url
stringlengths
51
54
repository_url
stringclasses
1 value
labels_url
stringlengths
65
68
comments_url
stringlengths
60
63
events_url
stringlengths
58
61
html_url
stringlengths
39
44
id
int64
1.78B
2.82B
node_id
stringlengths
18
19
number
int64
1
8.69k
title
stringlengths
1
382
user
dict
labels
listlengths
0
5
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
2
milestone
null
comments
int64
0
323
created_at
timestamp[s]
updated_at
timestamp[s]
closed_at
timestamp[s]
author_association
stringclasses
4 values
sub_issues_summary
dict
active_lock_reason
null
draft
bool
2 classes
pull_request
dict
body
stringlengths
2
118k
closed_by
dict
reactions
dict
timeline_url
stringlengths
60
63
performed_via_github_app
null
state_reason
stringclasses
4 values
is_pull_request
bool
2 classes
https://api.github.com/repos/ollama/ollama/issues/3639
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3639/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3639/comments
https://api.github.com/repos/ollama/ollama/issues/3639/events
https://github.com/ollama/ollama/issues/3639
2,242,233,698
I_kwDOJ0Z1Ps6FpcVi
3,639
MacOS not saving 0.0.0.0 between hardware restarts
{ "login": "gwthompson", "id": 177971, "node_id": "MDQ6VXNlcjE3Nzk3MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/177971?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gwthompson", "html_url": "https://github.com/gwthompson", "followers_url": "https://api.github.com/users/gwthompson/followers", "following_url": "https://api.github.com/users/gwthompson/following{/other_user}", "gists_url": "https://api.github.com/users/gwthompson/gists{/gist_id}", "starred_url": "https://api.github.com/users/gwthompson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gwthompson/subscriptions", "organizations_url": "https://api.github.com/users/gwthompson/orgs", "repos_url": "https://api.github.com/users/gwthompson/repos", "events_url": "https://api.github.com/users/gwthompson/events{/privacy}", "received_events_url": "https://api.github.com/users/gwthompson/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6677279472, "node_id": "LA_kwDOJ0Z1Ps8AAAABjf8y8A", "url": "https://api.github.com/repos/ollama/ollama/labels/macos", "name": "macos", "color": "E2DBC0", "default": false, "description": "" } ]
open
false
null
[]
null
8
2024-04-14T15:57:39
2024-11-06T17:36:06
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When I set launchctl setenv OLLAMA_HOST "0.0.0.0" and restart the Ollama ap everything works as expected and I can access the API from other devices on my network. However when I reboot my Mac the OLLAMA_HOST reverts back to 127.0.0.1 and I have to run launchctl setenv OLLAMA_HOST "0.0.0.0" again. ### What did you expect to see? When I run launchctl setenv OLLAMA_HOST "0.0.0.0" and restart my Mac I expected the OLLAMA_HOST to still be set at 0.0.0.0 ### Steps to reproduce 1. Run launchctl setenv OLLAMA_HOST "0.0.0.0" 2. Restart Ollama App 3. Check the logs to make sure it says "Listening on [::]:11434 (version 0.1.31)" 4. Reboot the Mac 5. Check the logs again and it now says "Listening on 127.0.0.1:11434 (version 0.1.31)" ### Are there any recent changes that introduced the issue? _No response_ ### OS macOS ### Architecture arm64 ### Platform _No response_ ### Ollama version 0.1.31 ### GPU Apple ### GPU info _No response_ ### CPU Apple ### Other software _No response_
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3639/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3639/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/7792
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7792/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7792/comments
https://api.github.com/repos/ollama/ollama/issues/7792/events
https://github.com/ollama/ollama/issues/7792
2,682,300,269
I_kwDOJ0Z1Ps6f4Kdt
7,792
Mistral Large instruct template
{ "login": "nicho2", "id": 11471811, "node_id": "MDQ6VXNlcjExNDcxODEx", "avatar_url": "https://avatars.githubusercontent.com/u/11471811?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nicho2", "html_url": "https://github.com/nicho2", "followers_url": "https://api.github.com/users/nicho2/followers", "following_url": "https://api.github.com/users/nicho2/following{/other_user}", "gists_url": "https://api.github.com/users/nicho2/gists{/gist_id}", "starred_url": "https://api.github.com/users/nicho2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nicho2/subscriptions", "organizations_url": "https://api.github.com/users/nicho2/orgs", "repos_url": "https://api.github.com/users/nicho2/repos", "events_url": "https://api.github.com/users/nicho2/events{/privacy}", "received_events_url": "https://api.github.com/users/nicho2/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-11-22T08:18:55
2024-11-23T21:03:49
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Hello , Ollama seems no apply a good template for Mistral Large: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7 mistral gives a SYSTEM_PROMPT token not apply: <s>[SYSTEM_PROMPT] <system prompt>[/SYSTEM_PROMPT][INST] <user message>[/INST] <assistant response></s>[INST] <user message>[/INST] example: <details> <summary>API messages</summary> { "model": "mistral-large:123b", "messages": [ { "role": "system", "content": "\nTu es un assistant dédié à supporter un utilisateur dans sa compréhension des données issues des espaces d'un bâtiment. \n\nVotre rôle principal est de répondre précisément aux demandes de l'utilisateur en coordonnant des agents spécialisés, chacun ayant des compétences spécifiques pour récupérer et analyser les données.\n\nPour chaque demande de l'utilisateur, tu réfléchis à quels assistants spécialisés tu dois faire appel. Il est possible que tu doives faire appel à plusieurs assistants à la suite , pour cela tu dois faire un plan de la meilleure démarche pour répondre au besoin du client. Tu sollicites l'utilisateur pour demander des informations complémentaires si sa demande n'est pas complète.\n\nTu délègues la tâche à l'assistant spécialisé approprié en invoquant l'outil correspondant. \n\nSeuls les assistants spécialisés sont autorisés à le faire pour l'utilisateur.\n\nL'utilisateur n'est pas au courant des différents assistants spécialisés, ne les mentionnez donc pas ; Déléguez simplement discrètement via des appels de fonction.\n\nPour répondre aux requêtes de l'utilisateur, tu devras vérifier qu'il a le droit d'accéder aux données de l'espace concerné.\n\nTu ne peut faire appel qu'à un seul agent à la fois.\n\n\n\nCurrent user:\n<User>\n{'user_id': '3'}\n</User> \nCurrent time: 2024-11-22 09:01:08.995806.\n", "images": [] }, { "role": "user", "content": "quelles sont les statistiques au workcafe ce matin", "images": [] } ], "tools": [ { "type": "function", "function": { "name": "tavily_search_results_json", "description": "A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events. Input should be a search query.", "parameters": { "properties": { "query": { "description": "search query to look up", "type": "string" } }, "required": [ "query" ], "type": "object" } } }, { "type": "function", "function": { "name": "ToAccesRightAgent", "description": "Transfert du travail à un agent spécialisé dans la gestion des droits d'accès.", "parameters": { "properties": { "request": { "description": "Toutes les questions concernant les droits d'accès de l'utilisateur aux différents espaces du bâtiment.", "type": "string" }, "username": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "description": "le login utilisateur" }, "user_id": { "description": "le user_id de l'utilisateur", "type": "string" } }, "required": [ "request", "username", "user_id" ], "type": "object" } } }, { "type": "function", "function": { "name": "ToDatasFetchingAgent", "description": "Transfert du travail à un agent spécialisé dans la récupération des données.", "parameters": { "properties": { "request": { "description": "Toutes les questions nécessitant la récupération des données d'espaces du bâtiment.", "type": "string" } }, "required": [ "request" ], "type": "object" } } }, { "type": "function", "function": { "name": "ToKpiPoliciesAgent", "description": "Transfert du travail à un agent spécialisé dans la récupération de règles permettant l'interprétation des données.", "parameters": { "properties": { "request": { "description": "Toutes les questions liées à l'interprétation des données d'espaces du bâtiment.", "type": "string" } }, "required": [ "request" ], "type": "object" } } } ], "stream": false, "format": "", "options": { "mirostat": null, "mirostat_eta": null, "mirostat_tau": null, "num_ctx": null, "num_gpu": null, "num_thread": null, "num_predict": null, "repeat_last_n": null, "repeat_penalty": null, "temperature": 0.3, "seed": null, "stop": null, "tfs_z": null, "top_k": null, "top_p": null }, "keep_alive": null } </details> <details> <summary>Prompt Ollama</summary> ~time=2024-11-22T08:01:22.589Z level=DEBUG source=routes.go:1465 msg="chat request" images=0 prompt="[AVAILABLE_TOOLS] [{\"type\":\"function\",\"function\":{\"name\":\"tavily_search_results_json\",\"description\":\"A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events. Input should be a search query.\",\"parameters\":{\"type\":\"object\",\"required\":[\"query\"],\"properties\":{\"query\":{\"type\":\"string\",\"description\":\"search query to look up\"}}}}},{\"type\":\"function\",\"function\":{\"name\":\"ToAccesRightAgent\",\"description\":\"Transfert du travail à un agent spécialisé dans la gestion des droits d'accès.\",\"parameters\":{\"type\":\"object\",\"required\":[\"request\",\"username\",\"user_id\"],\"properties\":{\"request\":{\"type\":\"string\",\"description\":\"Toutes les questions concernant les droits d'accès de l'utilisateur aux différents espaces du bâtiment.\"},\"user_id\":{\"type\":\"string\",\"description\":\"le user_id de l'utilisateur\"},\"username\":{\"type\":\"\",\"description\":\"le login utilisateur\"}}}}},{\"type\":\"function\",\"function\":{\"name\":\"ToDatasFetchingAgent\",\"description\":\"Transfert du travail à un agent spécialisé dans la récupération des données.\",\"parameters\":{\"type\":\"object\",\"required\":[\"request\"],\"properties\":{\"request\":{\"type\":\"string\",\"description\":\"Toutes les questions nécessitant la récupération des données d'espaces du bâtiment.\"}}}}},{\"type\":\"function\",\"function\":{\"name\":\"ToKpiPoliciesAgent\",\"description\":\"Transfert du travail à un agent spécialisé dans la récupération de règles permettant l'interprétation des données.\",\"parameters\":{\"type\":\"object\",\"required\":[\"request\"],\"properties\":{\"request\":{\"type\":\"string\",\"description\":\"Toutes les questions liées à l'interprétation des données d'espaces du bâtiment.\"}}}}}] [/AVAILABLE_TOOLS] [INST] Tu es un assistant dédié à supporter un utilisateur dans sa compréhension des données issues des espaces d'un bâtiment. \n\nVotre rôle principal est de répondre précisément aux demandes de l'utilisateur en coordonnant des agents spécialisés, chacun ayant des compétences spécifiques pour récupérer et analyser les données.\n\nPour chaque demande de l'utilisateur, tu réfléchis à quels assistants spécialisés tu dois faire appel. Il est possible que tu doives faire appel à plusieurs assistants à la suite , pour cela tu dois faire un plan de la meilleure démarche pour répondre au besoin du client. Tu sollicites l'utilisateur pour demander des informations complémentaires si sa demande n'est pas complète.\n\nTu délègues la tâche à l'assistant spécialisé approprié en invoquant l'outil correspondant. \n\nSeuls les assistants spécialisés sont autorisés à le faire pour l'utilisateur.\n\nL'utilisateur n'est pas au courant des différents assistants spécialisés, ne les mentionnez donc pas ; Déléguez simplement discrètement via des appels de fonction.\n\nPour répondre aux requêtes de l'utilisateur, tu devras vérifier qu'il a le droit d'accéder aux données de l'espace concerné.\n\nTu ne peut faire appel qu'à un seul agent à la fois.\n\n\n\nCurrent user:\n<User>\n{'user_id': '3'}\n</User> \nCurrent time: 2024-11-22 09:01:08.995806.\n\nquelles sont les statistiques au workcafe ce matin [/INST]\n" </details> ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.4.3
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7792/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7792/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1309
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1309/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1309/comments
https://api.github.com/repos/ollama/ollama/issues/1309/events
https://github.com/ollama/ollama/issues/1309
2,015,536,052
I_kwDOJ0Z1Ps54IqO0
1,309
[WSL2] Cuda error 222 : the provided PTX was compiled with an unsupported toolchain.
{ "login": "fxrobin", "id": 16342334, "node_id": "MDQ6VXNlcjE2MzQyMzM0", "avatar_url": "https://avatars.githubusercontent.com/u/16342334?v=4", "gravatar_id": "", "url": "https://api.github.com/users/fxrobin", "html_url": "https://github.com/fxrobin", "followers_url": "https://api.github.com/users/fxrobin/followers", "following_url": "https://api.github.com/users/fxrobin/following{/other_user}", "gists_url": "https://api.github.com/users/fxrobin/gists{/gist_id}", "starred_url": "https://api.github.com/users/fxrobin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fxrobin/subscriptions", "organizations_url": "https://api.github.com/users/fxrobin/orgs", "repos_url": "https://api.github.com/users/fxrobin/repos", "events_url": "https://api.github.com/users/fxrobin/events{/privacy}", "received_events_url": "https://api.github.com/users/fxrobin/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 6430601766, "node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg", "url": "https://api.github.com/repos/ollama/ollama/labels/nvidia", "name": "nvidia", "color": "8CDB00", "default": false, "description": "Issues relating to Nvidia GPUs and CUDA" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
6
2023-11-29T00:02:01
2024-03-12T16:18:02
2024-03-12T16:17:58
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
On Windows WSL2, with Cuda Toolkit Installed and Cuda-Container-Toolkit installed, I'm facing this issue running the official Docker image : ``` ollama-ollama-1 | 2023/11/29 00:36:04 llama.go:292: 3676 MB VRAM available, loading up to 21 GPU layers ollama-ollama-1 | 2023/11/29 00:36:04 llama.go:421: starting llama runner ollama-ollama-1 | 2023/11/29 00:36:04 llama.go:479: waiting for llama runner to start responding ollama-ollama-1 | ggml_init_cublas: GGML_CUDA_FORCE_MMQ: no ollama-ollama-1 | ggml_init_cublas: CUDA_USE_TENSOR_CORES: yes ollama-ollama-1 | ggml_init_cublas: found 1 CUDA devices: ollama-ollama-1 | Device 0: NVIDIA RTX A1000 Laptop GPU, compute capability 8.6 ollama-ollama-1 | ollama-ollama-1 | CUDA error 222 at /go/src/github.com/jmorganca/ollama/llm/llama.cpp/gguf/ggml-cuda.cu:5965: the provided PTX was compiled with an unsupported toolchain. ollama-ollama-1 | current device: 0 ollama-ollama-1 | 2023/11/29 00:36:04 llama.go:436: 222 at /go/src/github.com/jmorganca/ollama/llm/llama.cpp/gguf/ggml-cuda.cu:5965: the provided PTX was compiled with an unsupported toolchain. ollama-ollama-1 | current device: 0 ollama-ollama-1 | 2023/11/29 00:36:04 llama.go:444: error starting llama runner: llama runner process has terminated ollama-ollama-1 | 2023/11/29 00:36:04 llama.go:510: llama runner stopped successfully ollama-ollama-1 | 2023/11/29 00:36:04 llama.go:421: starting llama runner ollama-ollama-1 | 2023/11/29 00:36:04 llama.go:479: waiting for llama runner to start responding ollama-ollama-1 | {"timestamp":1701218164,"level":"WARNING","function":"server_params_parse","line":2035,"message":"Not compiled with GPU offload support, --n-gpu-layers option will be ignored. See main README.md for information on enabling GPU BLAS support","n_gpu_layers":-1} ollama-ollama-1 | {"timestamp":1701218164,"level":"INFO","function":"main","line":2534,"message":"build info","build":375,"commit":"9656026"} ollama-ollama-1 | {"timestamp":1701218164,"level":"INFO","function":"main","line":2537,"message":"system info","n_threads":12,"n_threads_batch":-1,"total_threads":24,"system_info":"AVX = 1 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | "} ollama-ollama-1 | llama_model_loader: loaded meta data with 18 key-value pairs and 196 tensors from /root/.ollama/models/blobs/sha256:305c4103a989d3f8ac457f912af30f32693f20dcffe1495e18c2ed7b5596b2d1 (version GGUF V2) ``` So Ollama is not using my GPU. When I check if Docker can use my GPU, it seems OK : ```$ docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi Tue Nov 28 23:56:24 2023 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 515.91 Driver Version: 517.89 CUDA Version: 11.7 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 NVIDIA RTX A100... On | 00000000:01:00.0 On | N/A | | N/A 38C P8 3W / N/A | 323MiB / 4096MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | 0 N/A N/A 22 G /Xwayland N/A | +-----------------------------------------------------------------------------+ ``` On Ollama startup, no warning about not accessing GPU : ``` ollama-ollama-1 | 2023/11/29 00:07:32 images.go:784: total blobs: 15 ollama-ollama-1 | 2023/11/29 00:07:32 images.go:791: total unused blobs removed: 0 ollama-ollama-1 | 2023/11/29 00:07:32 routes.go:777: Listening on [::]:11434 (version 0.1.12) ``` Here is my distribution : ``` $ uname -a Linux FRLFK0635009890 5.15.90.1-microsoft-standard-WSL2 #1 SMP Fri Jan 27 02:56:13 UTC 2023 x86_64 x86_64 x86_64 GNU/Linux $ lsb_release -a No LSB modules are available. Distributor ID: Ubuntu Description: Ubuntu 22.04.2 LTS Release: 22.04 Codename: jammy ``` Models : ``` root@de433da63a97:/# ollama list NAME ID SIZE MODIFIED codellama:latest 8fdf8f752f6e 3.8 GB 51 minutes ago codeup:latest 54289661f7a9 7.4 GB 39 minutes ago falcon:latest 4280f7257e73 4.2 GB 34 minutes ago ``` When I have a look at the source code of `ggml-cuda.cu` : ``` for (int id = 0; id < g_device_count; ++id) { CUDA_CHECK(ggml_cuda_set_device(id)); // create cuda streams for (int is = 0; is < MAX_STREAMS; ++is) { CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams[id][is], cudaStreamNonBlocking)); } // create cublas handle CUBLAS_CHECK(cublasCreate(&g_cublas_handles[id])); CUBLAS_CHECK(cublasSetMathMode(g_cublas_handles[id], CUBLAS_TF32_TENSOR_OP_MATH)); } ``` The error is raised by `CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams[id][is], cudaStreamNonBlocking));` in the for loop.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1309/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1309/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2825
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2825/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2825/comments
https://api.github.com/repos/ollama/ollama/issues/2825/events
https://github.com/ollama/ollama/issues/2825
2,160,283,652
I_kwDOJ0Z1Ps6Aw1AE
2,825
CPU does not have AVX or AVX2, disabling GPU support.
{ "login": "mingyue0094", "id": 63558866, "node_id": "MDQ6VXNlcjYzNTU4ODY2", "avatar_url": "https://avatars.githubusercontent.com/u/63558866?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mingyue0094", "html_url": "https://github.com/mingyue0094", "followers_url": "https://api.github.com/users/mingyue0094/followers", "following_url": "https://api.github.com/users/mingyue0094/following{/other_user}", "gists_url": "https://api.github.com/users/mingyue0094/gists{/gist_id}", "starred_url": "https://api.github.com/users/mingyue0094/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mingyue0094/subscriptions", "organizations_url": "https://api.github.com/users/mingyue0094/orgs", "repos_url": "https://api.github.com/users/mingyue0094/repos", "events_url": "https://api.github.com/users/mingyue0094/events{/privacy}", "received_events_url": "https://api.github.com/users/mingyue0094/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
4
2024-02-29T03:32:54
2024-03-01T17:45:47
2024-03-01T17:45:46
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I can enable GPU using pytorch. But using ollama, the above log is displayed. I would like to ask if it can support GPU. In CPU “does not have AVX or AVX2” ``` time=2024-02-29T11:21:58.722+08:00 level=INFO source=images.go:710 msg="total blobs: 5" time=2024-02-29T11:21:58.752+08:00 level=INFO source=images.go:717 msg="total unused blobs removed: 0" time=2024-02-29T11:21:58.755+08:00 level=INFO source=routes.go:1019 msg="Listening on 127.0.0.1:11434 (version 0.1.27)" time=2024-02-29T11:21:58.755+08:00 level=INFO source=payload_common.go:107 msg="Extracting dynamic libraries..." time=2024-02-29T11:21:59.004+08:00 level=INFO source=payload_common.go:146 msg="Dynamic LLM libraries [cuda_v11.3 cpu_avx cpu cpu_avx2]" time=2024-02-29T11:22:11.585+08:00 level=INFO source=gpu.go:94 msg="Detecting GPU type" time=2024-02-29T11:22:11.585+08:00 level=INFO source=gpu.go:265 msg="Searching for GPU management library nvml.dll" time=2024-02-29T11:22:11.627+08:00 level=INFO source=gpu.go:311 msg="Discovered GPU libraries: [c:\\Windows\\System32\\nvml.dll C:\\WINDOWS\\system32\\nvml.dll]" time=2024-02-29T11:22:11.649+08:00 level=INFO source=gpu.go:99 msg="Nvidia GPU detected" time=2024-02-29T11:22:11.650+08:00 level=INFO source=cpu_common.go:18 msg="CPU does not have vector extensions" time=2024-02-29T11:22:11.650+08:00 level=WARN source=gpu.go:128 msg="CPU does not have AVX or AVX2, disabling GPU support." time=2024-02-29T11:22:11.650+08:00 level=INFO source=cpu_common.go:18 msg="CPU does not have vector extensions" time=2024-02-29T11:22:11.650+08:00 level=WARN source=gpu.go:128 msg="CPU does not have AVX or AVX2, disabling GPU support." time=2024-02-29T11:22:11.650+08:00 level=INFO source=llm.go:77 msg="GPU not available, falling back to CPU" ```
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2825/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2825/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7195
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7195/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7195/comments
https://api.github.com/repos/ollama/ollama/issues/7195/events
https://github.com/ollama/ollama/issues/7195
2,584,627,655
I_kwDOJ0Z1Ps6aDknH
7,195
怎样在本地建一个ollama.com/library的服务
{ "login": "czhcc", "id": 4754730, "node_id": "MDQ6VXNlcjQ3NTQ3MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4754730?v=4", "gravatar_id": "", "url": "https://api.github.com/users/czhcc", "html_url": "https://github.com/czhcc", "followers_url": "https://api.github.com/users/czhcc/followers", "following_url": "https://api.github.com/users/czhcc/following{/other_user}", "gists_url": "https://api.github.com/users/czhcc/gists{/gist_id}", "starred_url": "https://api.github.com/users/czhcc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/czhcc/subscriptions", "organizations_url": "https://api.github.com/users/czhcc/orgs", "repos_url": "https://api.github.com/users/czhcc/repos", "events_url": "https://api.github.com/users/czhcc/events{/privacy}", "received_events_url": "https://api.github.com/users/czhcc/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-10-14T03:19:11
2024-10-16T00:04:30
2024-10-16T00:04:29
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? 能在本地建一个ollama.com/library的服务吗? 我用Modelfile时,内容是 FROM http://19.18.5.127/temp/myqwen7b.gguf 会出现错误 Error: pull model manifest: Get "http://19.18.5.127/v2/temp/myqwen7b.gguf/manifests/latest": EOF 要怎样提供一个本地的拉取服务? ### OS _No response_ ### GPU _No response_ ### CPU _No response_ ### Ollama version _No response_
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7195/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7195/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6662
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6662/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6662/comments
https://api.github.com/repos/ollama/ollama/issues/6662/events
https://github.com/ollama/ollama/pull/6662
2,508,748,864
PR_kwDOJ0Z1Ps56k9iG
6,662
Revert "Detect running in a container"
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-09-05T21:20:37
2024-09-05T21:26:01
2024-09-05T21:26:00
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6662", "html_url": "https://github.com/ollama/ollama/pull/6662", "diff_url": "https://github.com/ollama/ollama/pull/6662.diff", "patch_url": "https://github.com/ollama/ollama/pull/6662.patch", "merged_at": "2024-09-05T21:26:00" }
Reverts ollama/ollama#6495 Turns out this doesn't actually work on many platforms, so it doesn't serve much purpose.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6662/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6662/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/10
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/10/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/10/comments
https://api.github.com/repos/ollama/ollama/issues/10/events
https://github.com/ollama/ollama/pull/10
1,779,294,455
PR_kwDOJ0Z1Ps5UKP1q
10
add with symlink
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-06-28T16:26:42
2023-06-30T18:54:25
2023-06-30T18:54:22
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/10", "html_url": "https://github.com/ollama/ollama/pull/10", "diff_url": "https://github.com/ollama/ollama/pull/10.diff", "patch_url": "https://github.com/ollama/ollama/pull/10.patch", "merged_at": null }
null
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/10/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/10/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8045
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8045/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8045/comments
https://api.github.com/repos/ollama/ollama/issues/8045/events
https://github.com/ollama/ollama/issues/8045
2,732,812,651
I_kwDOJ0Z1Ps6i42lr
8,045
Ollama run hf.co - Error 401: Invalid username or password
{ "login": "bengrau", "id": 62591521, "node_id": "MDQ6VXNlcjYyNTkxNTIx", "avatar_url": "https://avatars.githubusercontent.com/u/62591521?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bengrau", "html_url": "https://github.com/bengrau", "followers_url": "https://api.github.com/users/bengrau/followers", "following_url": "https://api.github.com/users/bengrau/following{/other_user}", "gists_url": "https://api.github.com/users/bengrau/gists{/gist_id}", "starred_url": "https://api.github.com/users/bengrau/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bengrau/subscriptions", "organizations_url": "https://api.github.com/users/bengrau/orgs", "repos_url": "https://api.github.com/users/bengrau/repos", "events_url": "https://api.github.com/users/bengrau/events{/privacy}", "received_events_url": "https://api.github.com/users/bengrau/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-12-11T12:12:35
2025-01-02T14:23:32
2024-12-20T22:13:03
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I am using a private model on hf and try to run it like this: ``` huggingface-cli login --token hf_xxx ollama run hf.co/BGR/Llama-3.2-1B-I-p:latest ``` However I get this error from ollama: ``` pulling manifest Error: pull model manifest: 401: {"error":"Invalid username or password."} ``` Anyone experienced this issue or knows how to solve this? ### OS Linux ### GPU Nvidia ### CPU AMD ### Ollama version ollama version is 0.5.1
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8045/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8045/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6597
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6597/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6597/comments
https://api.github.com/repos/ollama/ollama/issues/6597/events
https://github.com/ollama/ollama/issues/6597
2,501,614,040
I_kwDOJ0Z1Ps6VG5nY
6,597
RPI with armhrf architecture support
{ "login": "alecrimi", "id": 16406658, "node_id": "MDQ6VXNlcjE2NDA2NjU4", "avatar_url": "https://avatars.githubusercontent.com/u/16406658?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alecrimi", "html_url": "https://github.com/alecrimi", "followers_url": "https://api.github.com/users/alecrimi/followers", "following_url": "https://api.github.com/users/alecrimi/following{/other_user}", "gists_url": "https://api.github.com/users/alecrimi/gists{/gist_id}", "starred_url": "https://api.github.com/users/alecrimi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alecrimi/subscriptions", "organizations_url": "https://api.github.com/users/alecrimi/orgs", "repos_url": "https://api.github.com/users/alecrimi/repos", "events_url": "https://api.github.com/users/alecrimi/events{/privacy}", "received_events_url": "https://api.github.com/users/alecrimi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 7700262114, "node_id": "LA_kwDOJ0Z1Ps8AAAAByvis4g", "url": "https://api.github.com/repos/ollama/ollama/labels/build", "name": "build", "color": "006b75", "default": false, "description": "Issues relating to building ollama from source" } ]
open
false
null
[]
null
1
2024-09-02T21:48:04
2024-11-04T19:20:15
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I followed all possible guide online, either using curl and your install.sh, the docker, or the snap package and I could manage to install ollama, not clear how to compile the code (there is no configure file). The main issue is that everyzthing has been prepared for arm64 and not for my architecture armhrf with I need for my current display. Is there as way to install the arm64 on the armhrf architecture? ### OS Linux ### GPU _No response_ ### CPU Other ### Ollama version any
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6597/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6597/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/7100
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7100/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7100/comments
https://api.github.com/repos/ollama/ollama/issues/7100/events
https://github.com/ollama/ollama/issues/7100
2,565,820,022
I_kwDOJ0Z1Ps6Y7052
7,100
mixtral:8x22b model does not work with system prompt only
{ "login": "gakugaku", "id": 14232275, "node_id": "MDQ6VXNlcjE0MjMyMjc1", "avatar_url": "https://avatars.githubusercontent.com/u/14232275?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gakugaku", "html_url": "https://github.com/gakugaku", "followers_url": "https://api.github.com/users/gakugaku/followers", "following_url": "https://api.github.com/users/gakugaku/following{/other_user}", "gists_url": "https://api.github.com/users/gakugaku/gists{/gist_id}", "starred_url": "https://api.github.com/users/gakugaku/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gakugaku/subscriptions", "organizations_url": "https://api.github.com/users/gakugaku/orgs", "repos_url": "https://api.github.com/users/gakugaku/repos", "events_url": "https://api.github.com/users/gakugaku/events{/privacy}", "received_events_url": "https://api.github.com/users/gakugaku/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
2
2024-10-04T08:46:37
2024-10-24T05:08:19
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? The `mixtral:8x22b-instruct` model does not work correctly when only the system prompt is provided. In such cases, an empty prompt is sent, leading to irrelevant output. This behavior may be related to the internal handling of prompts or recent changes made in the system prompt handling, as referenced in #4228. mixtral:8x22b-instruct template: https://ollama.com/library/mixtral:8x22b-instruct/blobs/138b3322e0da Mixtral 8x22B template in docs: https://github.com/ollama/ollama/blob/main/docs/template.md#mistral #### Steps to Reproduce 1. Input the following system prompt only into the `mixtral:8x22b-instruct` model. ```bash curl http://localhost:11434/api/chat -d '{ "model": "mixtral:8x22b-instruct-v0.1-q4_K_M", "stream": false, "messages": [ { "role": "system", "content": "Hello, I am Ollama. I am here to help you with your questions. What would you like to know?\n\nWhat is the capital of Japan?" } ] }' ``` 2. The Ollama log shows that the `prompt` field is empty. `prompt=\"\"\r\n` ```bash {"log":"time=2024-10-04T08:03:17.462Z level=DEBUG source=routes.go:1417 msg=\"chat request\" images=0 prompt=\"\"\r\n","stream":"stdout","time":"2024-10-04T08:03:17.462471779Z"} ``` 4. The output is unrelated to the input content. ```bash {"model":"mixtral:8x22b-instruct-v0.1-q4_K_M","created_at":"2024-10-04T08:03:40.224484198Z","message":{"role":"assistant","content":",\n.\nA new study by the University of Maryland and Johns Hopkins Medicine has found ..."} ``` #### Expected Behavior The `mixtral:8x22b-instruct` model should work with system prompt only, similar to other models like `gemma2:27b`. #### Actual Results mixtral:8x22b-instruct, system prompt only: NG <details><summary>Results</summary> <p> input: ```bash curl http://localhost:11434/api/chat -d '{ "model": "mixtral:8x22b-instruct-v0.1-q4_K_M", "stream": false, "messages": [ { "role": "system", "content": "Hello, I am Ollama. I am here to help you with your questions. What would you like to know?\n\nWhat is the capital of Japan?" } ] }' ``` Ollama log: ```bash {"log":"time=2024-10-04T08:03:17.462Z level=DEBUG source=routes.go:1417 msg=\"chat request\" images=0 prompt=\"\"\r\n","stream":"stdout","time":"2024-10-04T08:03:17.462471779Z"} ``` Output: ```bash {"model":"mixtral:8x22b-instruct-v0.1-q4_K_M","created_at":"2024-10-04T08:03:40.224484198Z","message":{"role":"assistant","content":",\n.\nA new study by the University of Maryland and Johns Hopkins Medicine has found ..."} ``` This result is not related to the input. </p> </details> mixtral:8x22b-instruct, system prompt + user prompt: OK <details><summary>Results</summary> <p> Input ```bash curl http://localhost:11434/api/chat -d '{ "model": "mixtral:8x22b-instruct-v0.1-q4_K_M", "stream": false, "messages": [ { "role": "system", "content": "Hello, I am Ollama. I am here to help you with your questions. What would you like to know?" }, { "role": "user", "content": "What is the capital of Japan?" } ] }' ``` Ollama log: ```bash {"log":"time=2024-10-04T08:08:04.464Z level=DEBUG source=routes.go:1417 msg=\"chat request\" images=0 prompt=\"[INST] Hello, I am Ollama. I am here to help you with your questions. What would you like to know?\\n\\nWhat is the capital of Japan?[/INST]\"\r\n","stream":"stdout","time":"2024-10-04T08:08:04.464656724Z"} ``` Output: ```bash {"model":"mixtral:8x22b-instruct-v0.1-q4_K_M","created_at":"2024-10-04T08:08:06.219305367Z","message":{"role":"assistant","content":" The capital of Japan is Tokyo. It's also the country's largest city and one of the world's most populous metropolitan areas."},"done_reason":"stop","done":true,"total_duration":1853131385,"load_duration":11283091,"prompt_eval_count":38,"prompt_eval_duration":348092000,"eval_count":32,"eval_duration":1361906000} ``` </p> </details> Other model example `gemma2:27b`, system prompt only: OK <details><summary>Results</summary> <p> Input ```bash curl http://localhost:11434/api/chat -d '{ "model": "gemma2:27b-instruct-q4_K_M", "stream": false, "messages": [ { "role": "system", "content": "Hello, I am Ollama. I am here to help you with your questions. What would you like to know?\n\nWhat is the capital of Japan?" } ] }' ``` Ollama log: ```bash {"log":"time=2024-10-04T08:12:01.403Z level=DEBUG source=routes.go:1417 msg=\"chat request\" images=0 prompt=\"\u003cstart_of_turn\u003euser\\nHello, I am Ollama. I am here to help you with your questions. What would you like to know?\\n\\nWhat is the capital of Japan? \u003cend_of_turn\u003e\\n\u003cstart_of_turn\u003emodel\\n\"\r\n","stream":"stdout","time":"2024-10-04T08:12:01.40376108Z"} ``` Output: ```bash {"model":"gemma2:27b-instruct-q4_K_M","created_at":"2024-10-04T08:10:22.460284079Z","message":{"role":"assistant","content":"The capital of Japan is **Tokyo**. 🏯 \n"},"done_reason":"stop","done":true,"total_duration":6504495197,"load_duration":6057933332,"prompt_eval_count":42,"prompt_eval_duration":66685000,"eval_count":13,"eval_duration":376322000} ``` </p> </details> #### Potentially Related Issues - https://github.com/ollama/ollama/issues/5547 - https://github.com/ollama/ollama/issues/6176 ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.3.12
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7100/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7100/timeline
null
reopened
false
https://api.github.com/repos/ollama/ollama/issues/629
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/629/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/629/comments
https://api.github.com/repos/ollama/ollama/issues/629/events
https://github.com/ollama/ollama/pull/629
1,916,664,927
PR_kwDOJ0Z1Ps5bZf4X
629
Update modelfile.md to reflect the usage of num_gpu.
{ "login": "aaroncoffey", "id": 3649791, "node_id": "MDQ6VXNlcjM2NDk3OTE=", "avatar_url": "https://avatars.githubusercontent.com/u/3649791?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aaroncoffey", "html_url": "https://github.com/aaroncoffey", "followers_url": "https://api.github.com/users/aaroncoffey/followers", "following_url": "https://api.github.com/users/aaroncoffey/following{/other_user}", "gists_url": "https://api.github.com/users/aaroncoffey/gists{/gist_id}", "starred_url": "https://api.github.com/users/aaroncoffey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aaroncoffey/subscriptions", "organizations_url": "https://api.github.com/users/aaroncoffey/orgs", "repos_url": "https://api.github.com/users/aaroncoffey/repos", "events_url": "https://api.github.com/users/aaroncoffey/events{/privacy}", "received_events_url": "https://api.github.com/users/aaroncoffey/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-09-28T04:09:38
2023-09-28T14:21:21
2023-09-28T14:21:21
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/629", "html_url": "https://github.com/ollama/ollama/pull/629", "diff_url": "https://github.com/ollama/ollama/pull/629.diff", "patch_url": "https://github.com/ollama/ollama/pull/629.patch", "merged_at": "2023-09-28T14:21:21" }
The current docs for the parameter num_gpu are inaccurate for linux. Ref: https://github.com/jmorganca/ollama/issues/618
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/629/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/629/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1722
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1722/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1722/comments
https://api.github.com/repos/ollama/ollama/issues/1722/events
https://github.com/ollama/ollama/issues/1722
2,056,612,248
I_kwDOJ0Z1Ps56lWmY
1,722
How to update a model in a timely manner?
{ "login": "PriyaranjanMaratheDish", "id": 133165012, "node_id": "U_kgDOB-_v1A", "avatar_url": "https://avatars.githubusercontent.com/u/133165012?v=4", "gravatar_id": "", "url": "https://api.github.com/users/PriyaranjanMaratheDish", "html_url": "https://github.com/PriyaranjanMaratheDish", "followers_url": "https://api.github.com/users/PriyaranjanMaratheDish/followers", "following_url": "https://api.github.com/users/PriyaranjanMaratheDish/following{/other_user}", "gists_url": "https://api.github.com/users/PriyaranjanMaratheDish/gists{/gist_id}", "starred_url": "https://api.github.com/users/PriyaranjanMaratheDish/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PriyaranjanMaratheDish/subscriptions", "organizations_url": "https://api.github.com/users/PriyaranjanMaratheDish/orgs", "repos_url": "https://api.github.com/users/PriyaranjanMaratheDish/repos", "events_url": "https://api.github.com/users/PriyaranjanMaratheDish/events{/privacy}", "received_events_url": "https://api.github.com/users/PriyaranjanMaratheDish/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
5
2023-12-26T18:17:45
2024-03-12T22:00:18
2024-03-12T22:00:18
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
So here is what I am trying to do - 1)Create a custom Ollama model by giving it data exported from Snowflake database tables. Data in Snowflake tables is already in a Golden Format. Have additional follow up questions on my requirement - A)Instead of creating the model using -f (file with data exported from Snowflake database), can I create a model GPT using results of Snowflake query execution? B)How to update this model in a timely manner? So that my results are consistent with the new data generated? TIA.
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1722/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/1722/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7205
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7205/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7205/comments
https://api.github.com/repos/ollama/ollama/issues/7205/events
https://github.com/ollama/ollama/pull/7205
2,587,277,772
PR_kwDOJ0Z1Ps5-moX0
7,205
Clear screen when `/clear` command is used in interactive mode
{ "login": "suyogdahal", "id": 41914389, "node_id": "MDQ6VXNlcjQxOTE0Mzg5", "avatar_url": "https://avatars.githubusercontent.com/u/41914389?v=4", "gravatar_id": "", "url": "https://api.github.com/users/suyogdahal", "html_url": "https://github.com/suyogdahal", "followers_url": "https://api.github.com/users/suyogdahal/followers", "following_url": "https://api.github.com/users/suyogdahal/following{/other_user}", "gists_url": "https://api.github.com/users/suyogdahal/gists{/gist_id}", "starred_url": "https://api.github.com/users/suyogdahal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/suyogdahal/subscriptions", "organizations_url": "https://api.github.com/users/suyogdahal/orgs", "repos_url": "https://api.github.com/users/suyogdahal/repos", "events_url": "https://api.github.com/users/suyogdahal/events{/privacy}", "received_events_url": "https://api.github.com/users/suyogdahal/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-10-14T23:37:08
2024-11-04T17:48:11
2024-11-04T17:48:11
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7205", "html_url": "https://github.com/ollama/ollama/pull/7205", "diff_url": "https://github.com/ollama/ollama/pull/7205.diff", "patch_url": "https://github.com/ollama/ollama/pull/7205.patch", "merged_at": null }
Use ANSI escape codes to clear the terminal and reset the cursor's position with the `/clear` command.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7205/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7205/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2614
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2614/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2614/comments
https://api.github.com/repos/ollama/ollama/issues/2614/events
https://github.com/ollama/ollama/issues/2614
2,144,424,882
I_kwDOJ0Z1Ps5_0VOy
2,614
AutoModelForCausalLM and .ollama/models
{ "login": "Demirrr", "id": 13405667, "node_id": "MDQ6VXNlcjEzNDA1NjY3", "avatar_url": "https://avatars.githubusercontent.com/u/13405667?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Demirrr", "html_url": "https://github.com/Demirrr", "followers_url": "https://api.github.com/users/Demirrr/followers", "following_url": "https://api.github.com/users/Demirrr/following{/other_user}", "gists_url": "https://api.github.com/users/Demirrr/gists{/gist_id}", "starred_url": "https://api.github.com/users/Demirrr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Demirrr/subscriptions", "organizations_url": "https://api.github.com/users/Demirrr/orgs", "repos_url": "https://api.github.com/users/Demirrr/repos", "events_url": "https://api.github.com/users/Demirrr/events{/privacy}", "received_events_url": "https://api.github.com/users/Demirrr/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-02-20T13:47:54
2025-01-06T19:37:55
2024-02-20T18:52:43
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Can we create an instance of `AutoModelForCausalLM` from downloaded language models `~/.ollama/models`? By this, the finetunning and using finetuned model via ollama would be easier. ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mixtral-8x7B-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) ```
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2614/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2614/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5395
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5395/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5395/comments
https://api.github.com/repos/ollama/ollama/issues/5395/events
https://github.com/ollama/ollama/issues/5395
2,382,431,501
I_kwDOJ0Z1Ps6OAQUN
5,395
CUBLAS_STATUS_ALLOC_FAILED with deepseek-coder-v2:16b
{ "login": "hgourvest", "id": 1659652, "node_id": "MDQ6VXNlcjE2NTk2NTI=", "avatar_url": "https://avatars.githubusercontent.com/u/1659652?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hgourvest", "html_url": "https://github.com/hgourvest", "followers_url": "https://api.github.com/users/hgourvest/followers", "following_url": "https://api.github.com/users/hgourvest/following{/other_user}", "gists_url": "https://api.github.com/users/hgourvest/gists{/gist_id}", "starred_url": "https://api.github.com/users/hgourvest/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hgourvest/subscriptions", "organizations_url": "https://api.github.com/users/hgourvest/orgs", "repos_url": "https://api.github.com/users/hgourvest/repos", "events_url": "https://api.github.com/users/hgourvest/events{/privacy}", "received_events_url": "https://api.github.com/users/hgourvest/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6430601766, "node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg", "url": "https://api.github.com/repos/ollama/ollama/labels/nvidia", "name": "nvidia", "color": "8CDB00", "default": false, "description": "Issues relating to Nvidia GPUs and CUDA" }, { "id": 6849881759, "node_id": "LA_kwDOJ0Z1Ps8AAAABmEjmnw", "url": "https://api.github.com/repos/ollama/ollama/labels/memory", "name": "memory", "color": "5017EA", "default": false, "description": "" } ]
open
false
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
11
2024-06-30T20:30:04
2025-01-26T15:24:21
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? when running deepseek-coder-v2:16b on NVIDIA GeForce RTX 3080 Laptop GPU, I have this crash report: ``` Error: llama runner process has terminated: signal: aborted (core dumped) CUDA error: CUBLAS_STATUS_ALLOC_FAILED current device: 0, in function cublas_handle at /go/src/github.com/ollama/ollama/llm/llama.cpp/ggml-cuda/common.cuh:826 cublasCreate_v2(&cublas_handles[device]) GGML_ASSERT: /go/src/github.com/ollama/ollama/llm/llama.cpp/ggml-cuda.cu:100: !"CUDA error" ``` if I run "16b-lite-instruct-q8_0" version, it works just fine. ### OS Linux ### GPU Nvidia, AMD ### CPU AMD ### Ollama version 0.1.48
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5395/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5395/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1142
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1142/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1142/comments
https://api.github.com/repos/ollama/ollama/issues/1142/events
https://github.com/ollama/ollama/issues/1142
1,995,407,192
I_kwDOJ0Z1Ps52739Y
1,142
Add support for llamacpp min_p sampler
{ "login": "JoseConseco", "id": 13521338, "node_id": "MDQ6VXNlcjEzNTIxMzM4", "avatar_url": "https://avatars.githubusercontent.com/u/13521338?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JoseConseco", "html_url": "https://github.com/JoseConseco", "followers_url": "https://api.github.com/users/JoseConseco/followers", "following_url": "https://api.github.com/users/JoseConseco/following{/other_user}", "gists_url": "https://api.github.com/users/JoseConseco/gists{/gist_id}", "starred_url": "https://api.github.com/users/JoseConseco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JoseConseco/subscriptions", "organizations_url": "https://api.github.com/users/JoseConseco/orgs", "repos_url": "https://api.github.com/users/JoseConseco/repos", "events_url": "https://api.github.com/users/JoseConseco/events{/privacy}", "received_events_url": "https://api.github.com/users/JoseConseco/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
3
2023-11-15T19:28:55
2024-07-27T21:37:42
2024-07-27T21:37:41
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
https://github.com/ggerganov/llama.cpp/pull/3841 ![obraz](https://github.com/jmorganca/ollama/assets/13521338/26509c9f-31a1-4544-8d8b-f3418e73a06c) It supposed to give better results compared to top_k, top_p. I tried to add this min_p - parameter to llama options, but it was unrecognized.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1142/reactions", "total_count": 15, "+1": 12, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 3 }
https://api.github.com/repos/ollama/ollama/issues/1142/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2555
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2555/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2555/comments
https://api.github.com/repos/ollama/ollama/issues/2555/events
https://github.com/ollama/ollama/issues/2555
2,139,747,300
I_kwDOJ0Z1Ps5_ifPk
2,555
`EOF` error on `/api/chat` or `/api/generate`
{ "login": "saamerm", "id": 8262287, "node_id": "MDQ6VXNlcjgyNjIyODc=", "avatar_url": "https://avatars.githubusercontent.com/u/8262287?v=4", "gravatar_id": "", "url": "https://api.github.com/users/saamerm", "html_url": "https://github.com/saamerm", "followers_url": "https://api.github.com/users/saamerm/followers", "following_url": "https://api.github.com/users/saamerm/following{/other_user}", "gists_url": "https://api.github.com/users/saamerm/gists{/gist_id}", "starred_url": "https://api.github.com/users/saamerm/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/saamerm/subscriptions", "organizations_url": "https://api.github.com/users/saamerm/orgs", "repos_url": "https://api.github.com/users/saamerm/repos", "events_url": "https://api.github.com/users/saamerm/events{/privacy}", "received_events_url": "https://api.github.com/users/saamerm/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
40
2024-02-17T02:11:48
2024-04-15T22:26:31
2024-04-15T22:26:30
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
* Upon running `ollama run dolphin-phi` on a Linux (works fine on Mac), I get this error `Error: Post "http://127.0.0.1:11434/api/chat": EOF`. * It seems to have installed successfully too, but it just seems like there's some error in the starting of the server? * I tried to add a --v for a more verbose understanding of the issue but that didnt help * Any ideas what I can do to debug? I have a feeling that the error is originating from [the Chat function of api/client.go](https://github.com/ollama/ollama/blob/f9fd08040be10bf3d944b642dff86020474cede6/api/client.go#L227) which gets called by [loadModel in cmd/interactive.go](https://github.com/ollama/ollama/blob/f9fd08040be10bf3d944b642dff86020474cede6/cmd/interactive.go#L59) which gets called by `generateInteractive()` in the same file which itself is called by the [RunHandler in cmd/cmd.go](https://github.com/ollama/ollama/blob/f9fd08040be10bf3d944b642dff86020474cede6/cmd/cmd.go#L212). Within that Chat() function, I'm guessing that the issue is coming from the `stream()`function in the same file, but I can't tell what line it might be originating from
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2555/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2555/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8564
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8564/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8564/comments
https://api.github.com/repos/ollama/ollama/issues/8564/events
https://github.com/ollama/ollama/issues/8564
2,809,178,195
I_kwDOJ0Z1Ps6ncKhT
8,564
Error: server metal not listed in available servers map
{ "login": "felix021", "id": 367085, "node_id": "MDQ6VXNlcjM2NzA4NQ==", "avatar_url": "https://avatars.githubusercontent.com/u/367085?v=4", "gravatar_id": "", "url": "https://api.github.com/users/felix021", "html_url": "https://github.com/felix021", "followers_url": "https://api.github.com/users/felix021/followers", "following_url": "https://api.github.com/users/felix021/following{/other_user}", "gists_url": "https://api.github.com/users/felix021/gists{/gist_id}", "starred_url": "https://api.github.com/users/felix021/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/felix021/subscriptions", "organizations_url": "https://api.github.com/users/felix021/orgs", "repos_url": "https://api.github.com/users/felix021/repos", "events_url": "https://api.github.com/users/felix021/events{/privacy}", "received_events_url": "https://api.github.com/users/felix021/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
2
2025-01-24T11:11:50
2025-01-26T02:26:54
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I downloaded Ollama today on my Macbook (Apple M3 Pro, with MacOS Sonoma 14.3 23D56), and tried to run deepseek-r1:8b, but ollama failed with this error: > $ ollama run deepseek-r1:8b > Error: [0] server metal not listed in available servers map[] p.s. I can run this model with llama-cli on the same device. ### OS macOS ### GPU Apple ### CPU Apple ### Ollama version 0.5.7
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8564/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8564/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/7507
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7507/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7507/comments
https://api.github.com/repos/ollama/ollama/issues/7507/events
https://github.com/ollama/ollama/issues/7507
2,635,203,425
I_kwDOJ0Z1Ps6dEgNh
7,507
OLLAMA_VERSION for pre-release doesn't work
{ "login": "ExposedCat", "id": 44642024, "node_id": "MDQ6VXNlcjQ0NjQyMDI0", "avatar_url": "https://avatars.githubusercontent.com/u/44642024?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ExposedCat", "html_url": "https://github.com/ExposedCat", "followers_url": "https://api.github.com/users/ExposedCat/followers", "following_url": "https://api.github.com/users/ExposedCat/following{/other_user}", "gists_url": "https://api.github.com/users/ExposedCat/gists{/gist_id}", "starred_url": "https://api.github.com/users/ExposedCat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ExposedCat/subscriptions", "organizations_url": "https://api.github.com/users/ExposedCat/orgs", "repos_url": "https://api.github.com/users/ExposedCat/repos", "events_url": "https://api.github.com/users/ExposedCat/events{/privacy}", "received_events_url": "https://api.github.com/users/ExposedCat/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396220, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA", "url": "https://api.github.com/repos/ollama/ollama/labels/question", "name": "question", "color": "d876e3", "default": true, "description": "General questions" }, { "id": 5755339642, "node_id": "LA_kwDOJ0Z1Ps8AAAABVwuDeg", "url": "https://api.github.com/repos/ollama/ollama/labels/linux", "name": "linux", "color": "516E70", "default": false, "description": "" }, { "id": 6678628138, "node_id": "LA_kwDOJ0Z1Ps8AAAABjhPHKg", "url": "https://api.github.com/repos/ollama/ollama/labels/install", "name": "install", "color": "E0B88D", "default": false, "description": "" } ]
closed
false
null
[]
null
2
2024-11-05T11:33:14
2024-11-05T16:33:52
2024-11-05T16:33:37
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? According to docs, this should download even pre-release versions: ```bash curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=X.Y.Z sh ``` However, it fails with `404` for `0.4.0` which is a pre-release version (latest stable works) ### OS Linux ### GPU AMD ### CPU AMD ### Ollama version 0.4.0
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7507/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7507/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7325
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7325/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7325/comments
https://api.github.com/repos/ollama/ollama/issues/7325/events
https://github.com/ollama/ollama/pull/7325
2,606,736,159
PR_kwDOJ0Z1Ps5_hmCU
7,325
added ollamarama-matrix to community integrations
{ "login": "h1ddenpr0cess20", "id": 127710567, "node_id": "U_kgDOB5y1Zw", "avatar_url": "https://avatars.githubusercontent.com/u/127710567?v=4", "gravatar_id": "", "url": "https://api.github.com/users/h1ddenpr0cess20", "html_url": "https://github.com/h1ddenpr0cess20", "followers_url": "https://api.github.com/users/h1ddenpr0cess20/followers", "following_url": "https://api.github.com/users/h1ddenpr0cess20/following{/other_user}", "gists_url": "https://api.github.com/users/h1ddenpr0cess20/gists{/gist_id}", "starred_url": "https://api.github.com/users/h1ddenpr0cess20/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/h1ddenpr0cess20/subscriptions", "organizations_url": "https://api.github.com/users/h1ddenpr0cess20/orgs", "repos_url": "https://api.github.com/users/h1ddenpr0cess20/repos", "events_url": "https://api.github.com/users/h1ddenpr0cess20/events{/privacy}", "received_events_url": "https://api.github.com/users/h1ddenpr0cess20/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-10-22T23:26:47
2024-11-22T01:49:30
2024-11-22T01:49:30
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7325", "html_url": "https://github.com/ollama/ollama/pull/7325", "diff_url": "https://github.com/ollama/ollama/pull/7325.diff", "patch_url": "https://github.com/ollama/ollama/pull/7325.patch", "merged_at": "2024-11-22T01:49:30" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7325/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7325/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/537
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/537/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/537/comments
https://api.github.com/repos/ollama/ollama/issues/537/events
https://github.com/ollama/ollama/pull/537
1,899,174,019
PR_kwDOJ0Z1Ps5aewPf
537
fix error on upload chunk
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-09-15T22:59:52
2023-09-16T00:48:40
2023-09-16T00:48:40
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/537", "html_url": "https://github.com/ollama/ollama/pull/537", "diff_url": "https://github.com/ollama/ollama/pull/537.diff", "patch_url": "https://github.com/ollama/ollama/pull/537.patch", "merged_at": "2023-09-16T00:48:40" }
null
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/537/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/537/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1350
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1350/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1350/comments
https://api.github.com/repos/ollama/ollama/issues/1350/events
https://github.com/ollama/ollama/pull/1350
2,021,761,364
PR_kwDOJ0Z1Ps5g8h4X
1,350
make linewrap still work when the terminal width has changed
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-12-02T00:18:40
2023-12-04T22:14:57
2023-12-04T22:14:56
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1350", "html_url": "https://github.com/ollama/ollama/pull/1350", "diff_url": "https://github.com/ollama/ollama/pull/1350.diff", "patch_url": "https://github.com/ollama/ollama/pull/1350.patch", "merged_at": "2023-12-04T22:14:56" }
null
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1350/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1350/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4188
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4188/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4188/comments
https://api.github.com/repos/ollama/ollama/issues/4188/events
https://github.com/ollama/ollama/pull/4188
2,279,827,184
PR_kwDOJ0Z1Ps5ulNf2
4,188
User our bundled libraries (cuda) instead of the host library
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-05-06T00:47:21
2024-05-06T21:41:16
2024-05-06T21:41:05
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4188", "html_url": "https://github.com/ollama/ollama/pull/4188", "diff_url": "https://github.com/ollama/ollama/pull/4188.diff", "patch_url": "https://github.com/ollama/ollama/pull/4188.patch", "merged_at": "2024-05-06T21:41:05" }
Trying to live off the land for cuda libraries was not the right strategy. We need to use the version we compiled against to ensure things work properly. This is most likely going to break Jetson v11 systems, but it turns out the change to favor host cuda libraries is breaking quite a few users.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4188/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4188/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8076
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8076/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8076/comments
https://api.github.com/repos/ollama/ollama/issues/8076/events
https://github.com/ollama/ollama/pull/8076
2,736,874,840
PR_kwDOJ0Z1Ps6FEhL9
8,076
api: return structured error on unauthorized push
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-12-12T21:02:29
2024-12-19T01:42:09
2024-12-19T01:42:08
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/8076", "html_url": "https://github.com/ollama/ollama/pull/8076", "diff_url": "https://github.com/ollama/ollama/pull/8076.diff", "patch_url": "https://github.com/ollama/ollama/pull/8076.patch", "merged_at": null }
This commit implements a structured error response system for the Ollama API, replacing ad-hoc error handling and string parsing with proper error types and codes. The key changes include: 1. Creation of a new `errors.go` file defining structured error types and codes 2. Introduction of `ErrorResponse` struct with standardized fields for error messages, codes, and additional data 3. Migration of error types from `errtypes` package to the `api` package 4. Removal of regex-based error message parsing in favor of structured data The structured error approach is better for several reasons: 1. For the specific case of unknown Ollama keys: - Previously, the code had to parse error messages using regex to extract the SSH key - Now, the key is properly passed in the error response's data field - This eliminates brittle string parsing and potential false matches - Makes it easier to handle different keys (user vs service) consistently 2. For API clients in general: - Error codes allow programmatic error handling without string matching - Additional context can be passed via the data field without breaking existing clients - Error messages can be localized or modified without breaking error handling logic - Provides a consistent error format across all API endpoints - Makes it easier to document and version API errors - Reduces the likelihood of subtle bugs from error message parsing This change makes the API more maintainable and reliable while providing a better developer experience for API consumers. If this change is accepted I can go through and change the rest of the responses to use the API error type. It remains backwards compatible by keeping the same `error` field in the response for the message.
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8076/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8076/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2047
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2047/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2047/comments
https://api.github.com/repos/ollama/ollama/issues/2047/events
https://github.com/ollama/ollama/issues/2047
2,088,134,294
I_kwDOJ0Z1Ps58dmaW
2,047
ollama run stable-code
{ "login": "JiangZongKang", "id": 22634440, "node_id": "MDQ6VXNlcjIyNjM0NDQw", "avatar_url": "https://avatars.githubusercontent.com/u/22634440?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JiangZongKang", "html_url": "https://github.com/JiangZongKang", "followers_url": "https://api.github.com/users/JiangZongKang/followers", "following_url": "https://api.github.com/users/JiangZongKang/following{/other_user}", "gists_url": "https://api.github.com/users/JiangZongKang/gists{/gist_id}", "starred_url": "https://api.github.com/users/JiangZongKang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JiangZongKang/subscriptions", "organizations_url": "https://api.github.com/users/JiangZongKang/orgs", "repos_url": "https://api.github.com/users/JiangZongKang/repos", "events_url": "https://api.github.com/users/JiangZongKang/events{/privacy}", "received_events_url": "https://api.github.com/users/JiangZongKang/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-01-18T11:56:31
2024-02-07T01:10:30
2024-02-07T01:10:30
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
The command does not produce any response when executed on a Mac. ![CleanShot 2024-01-18 at 19 56 17@2x](https://github.com/jmorganca/ollama/assets/22634440/f423f706-10b1-496a-bb8e-50a85afbea6b)
{ "login": "JiangZongKang", "id": 22634440, "node_id": "MDQ6VXNlcjIyNjM0NDQw", "avatar_url": "https://avatars.githubusercontent.com/u/22634440?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JiangZongKang", "html_url": "https://github.com/JiangZongKang", "followers_url": "https://api.github.com/users/JiangZongKang/followers", "following_url": "https://api.github.com/users/JiangZongKang/following{/other_user}", "gists_url": "https://api.github.com/users/JiangZongKang/gists{/gist_id}", "starred_url": "https://api.github.com/users/JiangZongKang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JiangZongKang/subscriptions", "organizations_url": "https://api.github.com/users/JiangZongKang/orgs", "repos_url": "https://api.github.com/users/JiangZongKang/repos", "events_url": "https://api.github.com/users/JiangZongKang/events{/privacy}", "received_events_url": "https://api.github.com/users/JiangZongKang/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2047/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2047/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2866
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2866/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2866/comments
https://api.github.com/repos/ollama/ollama/issues/2866/events
https://github.com/ollama/ollama/pull/2866
2,163,669,751
PR_kwDOJ0Z1Ps5obHs7
2,866
chore: update readme, add open-webui
{ "login": "longregen", "id": 114724657, "node_id": "U_kgDOBtaPMQ", "avatar_url": "https://avatars.githubusercontent.com/u/114724657?v=4", "gravatar_id": "", "url": "https://api.github.com/users/longregen", "html_url": "https://github.com/longregen", "followers_url": "https://api.github.com/users/longregen/followers", "following_url": "https://api.github.com/users/longregen/following{/other_user}", "gists_url": "https://api.github.com/users/longregen/gists{/gist_id}", "starred_url": "https://api.github.com/users/longregen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/longregen/subscriptions", "organizations_url": "https://api.github.com/users/longregen/orgs", "repos_url": "https://api.github.com/users/longregen/repos", "events_url": "https://api.github.com/users/longregen/events{/privacy}", "received_events_url": "https://api.github.com/users/longregen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-03-01T15:44:59
2024-03-09T22:24:46
2024-03-09T22:24:46
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2866", "html_url": "https://github.com/ollama/ollama/pull/2866", "diff_url": "https://github.com/ollama/ollama/pull/2866.diff", "patch_url": "https://github.com/ollama/ollama/pull/2866.patch", "merged_at": null }
After testing most of these suggested frontends, "Open WebUI", formerly "ollama-webui", looks like the best open option for amateurs looking to self-host a frontend similar to OpenAI's ChatGPT interface.
{ "login": "longregen", "id": 114724657, "node_id": "U_kgDOBtaPMQ", "avatar_url": "https://avatars.githubusercontent.com/u/114724657?v=4", "gravatar_id": "", "url": "https://api.github.com/users/longregen", "html_url": "https://github.com/longregen", "followers_url": "https://api.github.com/users/longregen/followers", "following_url": "https://api.github.com/users/longregen/following{/other_user}", "gists_url": "https://api.github.com/users/longregen/gists{/gist_id}", "starred_url": "https://api.github.com/users/longregen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/longregen/subscriptions", "organizations_url": "https://api.github.com/users/longregen/orgs", "repos_url": "https://api.github.com/users/longregen/repos", "events_url": "https://api.github.com/users/longregen/events{/privacy}", "received_events_url": "https://api.github.com/users/longregen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2866/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2866/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5444
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5444/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5444/comments
https://api.github.com/repos/ollama/ollama/issues/5444/events
https://github.com/ollama/ollama/issues/5444
2,387,196,468
I_kwDOJ0Z1Ps6OSbo0
5,444
Ollama on Mac not free up space / what is equivalence of /usr/share/ollama/.ollama/models
{ "login": "tomaszstachera", "id": 61825692, "node_id": "MDQ6VXNlcjYxODI1Njky", "avatar_url": "https://avatars.githubusercontent.com/u/61825692?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tomaszstachera", "html_url": "https://github.com/tomaszstachera", "followers_url": "https://api.github.com/users/tomaszstachera/followers", "following_url": "https://api.github.com/users/tomaszstachera/following{/other_user}", "gists_url": "https://api.github.com/users/tomaszstachera/gists{/gist_id}", "starred_url": "https://api.github.com/users/tomaszstachera/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tomaszstachera/subscriptions", "organizations_url": "https://api.github.com/users/tomaszstachera/orgs", "repos_url": "https://api.github.com/users/tomaszstachera/repos", "events_url": "https://api.github.com/users/tomaszstachera/events{/privacy}", "received_events_url": "https://api.github.com/users/tomaszstachera/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-07-02T21:25:47
2024-07-03T20:44:08
2024-07-03T20:44:08
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I've ran `ollama run llama3:70b` on Mac and CLI pulled 40GB of data that is not stored in ~/.ollama. `ollama list` shows no models. Where the heck is the data? How to clean it up? ### OS macOS ### GPU _No response_ ### CPU _No response_ ### Ollama version 0.1.48
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5444/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5444/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7707
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7707/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7707/comments
https://api.github.com/repos/ollama/ollama/issues/7707/events
https://github.com/ollama/ollama/pull/7707
2,666,149,630
PR_kwDOJ0Z1Ps6CKcMn
7,707
Update README.md
{ "login": "adarshM84", "id": 95633830, "node_id": "U_kgDOBbNBpg", "avatar_url": "https://avatars.githubusercontent.com/u/95633830?v=4", "gravatar_id": "", "url": "https://api.github.com/users/adarshM84", "html_url": "https://github.com/adarshM84", "followers_url": "https://api.github.com/users/adarshM84/followers", "following_url": "https://api.github.com/users/adarshM84/following{/other_user}", "gists_url": "https://api.github.com/users/adarshM84/gists{/gist_id}", "starred_url": "https://api.github.com/users/adarshM84/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/adarshM84/subscriptions", "organizations_url": "https://api.github.com/users/adarshM84/orgs", "repos_url": "https://api.github.com/users/adarshM84/repos", "events_url": "https://api.github.com/users/adarshM84/events{/privacy}", "received_events_url": "https://api.github.com/users/adarshM84/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-11-17T16:51:57
2024-11-20T18:42:56
2024-11-20T18:42:56
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7707", "html_url": "https://github.com/ollama/ollama/pull/7707", "diff_url": "https://github.com/ollama/ollama/pull/7707.diff", "patch_url": "https://github.com/ollama/ollama/pull/7707.patch", "merged_at": "2024-11-20T18:42:56" }
This Chrome extension will help users interact with the UI. Users can download and delete models from the UI, along with many other features.
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7707/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7707/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6527
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6527/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6527/comments
https://api.github.com/repos/ollama/ollama/issues/6527/events
https://github.com/ollama/ollama/issues/6527
2,489,702,822
I_kwDOJ0Z1Ps6UZdmm
6,527
stella_en_400M_v5 model request
{ "login": "raymond-infinitecode", "id": 4714784, "node_id": "MDQ6VXNlcjQ3MTQ3ODQ=", "avatar_url": "https://avatars.githubusercontent.com/u/4714784?v=4", "gravatar_id": "", "url": "https://api.github.com/users/raymond-infinitecode", "html_url": "https://github.com/raymond-infinitecode", "followers_url": "https://api.github.com/users/raymond-infinitecode/followers", "following_url": "https://api.github.com/users/raymond-infinitecode/following{/other_user}", "gists_url": "https://api.github.com/users/raymond-infinitecode/gists{/gist_id}", "starred_url": "https://api.github.com/users/raymond-infinitecode/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/raymond-infinitecode/subscriptions", "organizations_url": "https://api.github.com/users/raymond-infinitecode/orgs", "repos_url": "https://api.github.com/users/raymond-infinitecode/repos", "events_url": "https://api.github.com/users/raymond-infinitecode/events{/privacy}", "received_events_url": "https://api.github.com/users/raymond-infinitecode/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
4
2024-08-27T15:22:55
2024-11-16T20:45:03
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Need help supporting https://hf.rst.im/dunzhang/stella_en_400M_v5 since we have also https://ollama.com/Losspost/stella_en_1.5b_v5
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6527/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6527/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/8298
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8298/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8298/comments
https://api.github.com/repos/ollama/ollama/issues/8298/events
https://github.com/ollama/ollama/pull/8298
2,768,088,834
PR_kwDOJ0Z1Ps6GsIFr
8,298
api: remove unused create fields
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2025-01-03T19:49:23
2025-01-03T20:04:00
2025-01-03T20:03:58
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/8298", "html_url": "https://github.com/ollama/ollama/pull/8298", "diff_url": "https://github.com/ollama/ollama/pull/8298.diff", "patch_url": "https://github.com/ollama/ollama/pull/8298.patch", "merged_at": "2025-01-03T20:03:58" }
These fields are deprecated, but specifying them will not do anything. Removing them as the other deprecated fields will still work, but these do not, so they dont match our existing pattern.
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8298/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8298/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8334
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8334/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8334/comments
https://api.github.com/repos/ollama/ollama/issues/8334/events
https://github.com/ollama/ollama/pull/8334
2,772,419,184
PR_kwDOJ0Z1Ps6G6oQR
8,334
readme: add Reins to community integrations
{ "login": "ibrahimcetin", "id": 33904390, "node_id": "MDQ6VXNlcjMzOTA0Mzkw", "avatar_url": "https://avatars.githubusercontent.com/u/33904390?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ibrahimcetin", "html_url": "https://github.com/ibrahimcetin", "followers_url": "https://api.github.com/users/ibrahimcetin/followers", "following_url": "https://api.github.com/users/ibrahimcetin/following{/other_user}", "gists_url": "https://api.github.com/users/ibrahimcetin/gists{/gist_id}", "starred_url": "https://api.github.com/users/ibrahimcetin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ibrahimcetin/subscriptions", "organizations_url": "https://api.github.com/users/ibrahimcetin/orgs", "repos_url": "https://api.github.com/users/ibrahimcetin/repos", "events_url": "https://api.github.com/users/ibrahimcetin/events{/privacy}", "received_events_url": "https://api.github.com/users/ibrahimcetin/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
0
2025-01-07T10:05:06
2025-01-07T10:05:06
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/8334", "html_url": "https://github.com/ollama/ollama/pull/8334", "diff_url": "https://github.com/ollama/ollama/pull/8334.diff", "patch_url": "https://github.com/ollama/ollama/pull/8334.patch", "merged_at": null }
null
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8334/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8334/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8547
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8547/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8547/comments
https://api.github.com/repos/ollama/ollama/issues/8547/events
https://github.com/ollama/ollama/issues/8547
2,806,414,251
I_kwDOJ0Z1Ps6nRnur
8,547
deepseek-r1 `qwen` variants use a new pre-tokenizer, which is not implemented in the llama.cpp version used
{ "login": "sealad886", "id": 155285242, "node_id": "U_kgDOCUF2-g", "avatar_url": "https://avatars.githubusercontent.com/u/155285242?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sealad886", "html_url": "https://github.com/sealad886", "followers_url": "https://api.github.com/users/sealad886/followers", "following_url": "https://api.github.com/users/sealad886/following{/other_user}", "gists_url": "https://api.github.com/users/sealad886/gists{/gist_id}", "starred_url": "https://api.github.com/users/sealad886/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sealad886/subscriptions", "organizations_url": "https://api.github.com/users/sealad886/orgs", "repos_url": "https://api.github.com/users/sealad886/repos", "events_url": "https://api.github.com/users/sealad886/events{/privacy}", "received_events_url": "https://api.github.com/users/sealad886/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
0
2025-01-23T09:42:59
2025-01-23T09:43:17
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? The newly supported `deepseek-r1` model variants that have `distill-qwen` in the name use a new pre-tokenizer. Support for this has been added to the latest llama.cpp (not sure if the release version or just the latest commit on the main branch). The backend llama.cpp that Ollama uses should be updated to support this, since the `default` pre-tokenizer is very different than the bespoke version. ### OS Linux, macOS, Windows, Docker, WSL2 ### GPU _No response_ ### CPU _No response_ ### Ollama version 0.5.7
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8547/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8547/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/5713
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5713/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5713/comments
https://api.github.com/repos/ollama/ollama/issues/5713/events
https://github.com/ollama/ollama/pull/5713
2,409,876,869
PR_kwDOJ0Z1Ps51crga
5,713
server: return empty slice on empty `/api/embed` request
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-07-16T00:19:47
2024-07-16T00:39:46
2024-07-16T00:39:45
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5713", "html_url": "https://github.com/ollama/ollama/pull/5713", "diff_url": "https://github.com/ollama/ollama/pull/5713.diff", "patch_url": "https://github.com/ollama/ollama/pull/5713.patch", "merged_at": "2024-07-16T00:39:45" }
Before: ``` curl http://localhost:11434/api/embed \ -H "Content-Type: application/json" \ -d '{ "input": "", "model": "all-minilm" }' {"model":"all-minilm"} ``` After: ``` curl http://localhost:11434/api/embed \ -H "Content-Type: application/json" \ -d '{ "input": "", "model": "all-minilm" }' {"model":"all-minilm","embeddings":[]} ```
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5713/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5713/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4725
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4725/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4725/comments
https://api.github.com/repos/ollama/ollama/issues/4725/events
https://github.com/ollama/ollama/pull/4725
2,326,020,262
PR_kwDOJ0Z1Ps5xB8GC
4,725
Make examples/go-chat iterative
{ "login": "w84miracle", "id": 1922754, "node_id": "MDQ6VXNlcjE5MjI3NTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/1922754?v=4", "gravatar_id": "", "url": "https://api.github.com/users/w84miracle", "html_url": "https://github.com/w84miracle", "followers_url": "https://api.github.com/users/w84miracle/followers", "following_url": "https://api.github.com/users/w84miracle/following{/other_user}", "gists_url": "https://api.github.com/users/w84miracle/gists{/gist_id}", "starred_url": "https://api.github.com/users/w84miracle/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/w84miracle/subscriptions", "organizations_url": "https://api.github.com/users/w84miracle/orgs", "repos_url": "https://api.github.com/users/w84miracle/repos", "events_url": "https://api.github.com/users/w84miracle/events{/privacy}", "received_events_url": "https://api.github.com/users/w84miracle/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
1
2024-05-30T15:54:58
2024-06-05T12:47:12
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4725", "html_url": "https://github.com/ollama/ollama/pull/4725", "diff_url": "https://github.com/ollama/ollama/pull/4725.diff", "patch_url": "https://github.com/ollama/ollama/pull/4725.patch", "merged_at": null }
Aligned with other language's chat examples, making it iterative.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4725/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4725/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4672
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4672/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4672/comments
https://api.github.com/repos/ollama/ollama/issues/4672/events
https://github.com/ollama/ollama/pull/4672
2,320,025,163
PR_kwDOJ0Z1Ps5wtcbf
4,672
Add OllamaSpring Project to Readme
{ "login": "CrazyNeil", "id": 5747549, "node_id": "MDQ6VXNlcjU3NDc1NDk=", "avatar_url": "https://avatars.githubusercontent.com/u/5747549?v=4", "gravatar_id": "", "url": "https://api.github.com/users/CrazyNeil", "html_url": "https://github.com/CrazyNeil", "followers_url": "https://api.github.com/users/CrazyNeil/followers", "following_url": "https://api.github.com/users/CrazyNeil/following{/other_user}", "gists_url": "https://api.github.com/users/CrazyNeil/gists{/gist_id}", "starred_url": "https://api.github.com/users/CrazyNeil/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/CrazyNeil/subscriptions", "organizations_url": "https://api.github.com/users/CrazyNeil/orgs", "repos_url": "https://api.github.com/users/CrazyNeil/repos", "events_url": "https://api.github.com/users/CrazyNeil/events{/privacy}", "received_events_url": "https://api.github.com/users/CrazyNeil/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-05-28T02:55:13
2024-05-28T02:58:27
2024-05-28T02:58:27
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4672", "html_url": "https://github.com/ollama/ollama/pull/4672", "diff_url": "https://github.com/ollama/ollama/pull/4672.diff", "patch_url": "https://github.com/ollama/ollama/pull/4672.patch", "merged_at": "2024-05-28T02:58:27" }
Add OllamaSpring Project to Readme
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4672/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4672/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3263
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3263/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3263/comments
https://api.github.com/repos/ollama/ollama/issues/3263/events
https://github.com/ollama/ollama/issues/3263
2,196,880,108
I_kwDOJ0Z1Ps6C8brs
3,263
MiniCPM 2B Model add
{ "login": "GavinBF", "id": 18061367, "node_id": "MDQ6VXNlcjE4MDYxMzY3", "avatar_url": "https://avatars.githubusercontent.com/u/18061367?v=4", "gravatar_id": "", "url": "https://api.github.com/users/GavinBF", "html_url": "https://github.com/GavinBF", "followers_url": "https://api.github.com/users/GavinBF/followers", "following_url": "https://api.github.com/users/GavinBF/following{/other_user}", "gists_url": "https://api.github.com/users/GavinBF/gists{/gist_id}", "starred_url": "https://api.github.com/users/GavinBF/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/GavinBF/subscriptions", "organizations_url": "https://api.github.com/users/GavinBF/orgs", "repos_url": "https://api.github.com/users/GavinBF/repos", "events_url": "https://api.github.com/users/GavinBF/events{/privacy}", "received_events_url": "https://api.github.com/users/GavinBF/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
1
2024-03-20T07:49:11
2024-06-09T17:11:38
2024-06-09T17:11:38
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What model would you like? Can we add MiniCPM model? https://github.com/OpenBMB/MiniCPM https://huggingface.co/collections/openbmb/minicpm-2b-65d48bf958302b9fd25b698f
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3263/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3263/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6197
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6197/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6197/comments
https://api.github.com/repos/ollama/ollama/issues/6197/events
https://github.com/ollama/ollama/issues/6197
2,450,613,273
I_kwDOJ0Z1Ps6SEWQZ
6,197
'FROM' is not recognized as an internal or external command, operable program or batch file.
{ "login": "LaksLaksman", "id": 152250473, "node_id": "U_kgDOCRMoaQ", "avatar_url": "https://avatars.githubusercontent.com/u/152250473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LaksLaksman", "html_url": "https://github.com/LaksLaksman", "followers_url": "https://api.github.com/users/LaksLaksman/followers", "following_url": "https://api.github.com/users/LaksLaksman/following{/other_user}", "gists_url": "https://api.github.com/users/LaksLaksman/gists{/gist_id}", "starred_url": "https://api.github.com/users/LaksLaksman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LaksLaksman/subscriptions", "organizations_url": "https://api.github.com/users/LaksLaksman/orgs", "repos_url": "https://api.github.com/users/LaksLaksman/repos", "events_url": "https://api.github.com/users/LaksLaksman/events{/privacy}", "received_events_url": "https://api.github.com/users/LaksLaksman/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
0
2024-08-06T11:10:52
2024-08-06T11:14:44
2024-08-06T11:14:44
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
*'FROM' is not recognized as an internal or external command,* C:\Users\LaksmanP>FROM llama3.1 PARAMETER temperature 1 'FROM' is not recognized as an internal or external command, operable program or batch file. this message showing when I set parameter after pulling the model. ### OS Windows ### GPU Intel ### CPU Intel ### Ollama version 0.3.3
{ "login": "LaksLaksman", "id": 152250473, "node_id": "U_kgDOCRMoaQ", "avatar_url": "https://avatars.githubusercontent.com/u/152250473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LaksLaksman", "html_url": "https://github.com/LaksLaksman", "followers_url": "https://api.github.com/users/LaksLaksman/followers", "following_url": "https://api.github.com/users/LaksLaksman/following{/other_user}", "gists_url": "https://api.github.com/users/LaksLaksman/gists{/gist_id}", "starred_url": "https://api.github.com/users/LaksLaksman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LaksLaksman/subscriptions", "organizations_url": "https://api.github.com/users/LaksLaksman/orgs", "repos_url": "https://api.github.com/users/LaksLaksman/repos", "events_url": "https://api.github.com/users/LaksLaksman/events{/privacy}", "received_events_url": "https://api.github.com/users/LaksLaksman/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6197/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6197/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5909
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5909/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5909/comments
https://api.github.com/repos/ollama/ollama/issues/5909/events
https://github.com/ollama/ollama/issues/5909
2,427,434,752
I_kwDOJ0Z1Ps6Qr7cA
5,909
" Error: json: cannot unmarshal array into Go struct field Params.eos_token_id of type int " while importing llama 3.1 8B safetensor model from huggingface
{ "login": "SadeghPouriyanZadeh", "id": 74629673, "node_id": "MDQ6VXNlcjc0NjI5Njcz", "avatar_url": "https://avatars.githubusercontent.com/u/74629673?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SadeghPouriyanZadeh", "html_url": "https://github.com/SadeghPouriyanZadeh", "followers_url": "https://api.github.com/users/SadeghPouriyanZadeh/followers", "following_url": "https://api.github.com/users/SadeghPouriyanZadeh/following{/other_user}", "gists_url": "https://api.github.com/users/SadeghPouriyanZadeh/gists{/gist_id}", "starred_url": "https://api.github.com/users/SadeghPouriyanZadeh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SadeghPouriyanZadeh/subscriptions", "organizations_url": "https://api.github.com/users/SadeghPouriyanZadeh/orgs", "repos_url": "https://api.github.com/users/SadeghPouriyanZadeh/repos", "events_url": "https://api.github.com/users/SadeghPouriyanZadeh/events{/privacy}", "received_events_url": "https://api.github.com/users/SadeghPouriyanZadeh/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "joshyan1", "id": 76125168, "node_id": "MDQ6VXNlcjc2MTI1MTY4", "avatar_url": "https://avatars.githubusercontent.com/u/76125168?v=4", "gravatar_id": "", "url": "https://api.github.com/users/joshyan1", "html_url": "https://github.com/joshyan1", "followers_url": "https://api.github.com/users/joshyan1/followers", "following_url": "https://api.github.com/users/joshyan1/following{/other_user}", "gists_url": "https://api.github.com/users/joshyan1/gists{/gist_id}", "starred_url": "https://api.github.com/users/joshyan1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/joshyan1/subscriptions", "organizations_url": "https://api.github.com/users/joshyan1/orgs", "repos_url": "https://api.github.com/users/joshyan1/repos", "events_url": "https://api.github.com/users/joshyan1/events{/privacy}", "received_events_url": "https://api.github.com/users/joshyan1/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "joshyan1", "id": 76125168, "node_id": "MDQ6VXNlcjc2MTI1MTY4", "avatar_url": "https://avatars.githubusercontent.com/u/76125168?v=4", "gravatar_id": "", "url": "https://api.github.com/users/joshyan1", "html_url": "https://github.com/joshyan1", "followers_url": "https://api.github.com/users/joshyan1/followers", "following_url": "https://api.github.com/users/joshyan1/following{/other_user}", "gists_url": "https://api.github.com/users/joshyan1/gists{/gist_id}", "starred_url": "https://api.github.com/users/joshyan1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/joshyan1/subscriptions", "organizations_url": "https://api.github.com/users/joshyan1/orgs", "repos_url": "https://api.github.com/users/joshyan1/repos", "events_url": "https://api.github.com/users/joshyan1/events{/privacy}", "received_events_url": "https://api.github.com/users/joshyan1/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
12
2024-07-24T12:15:53
2024-11-21T12:38:57
2024-09-02T00:19:05
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ## What is the problem? I was importing the llama 3.1 8B model from huggingface (`meta-llama/Meta-Llama-3.1-8B-Instruct`) using `ollama create -f Modelfile` but i got this error: `Error: json: cannot unmarshal array into Go struct field Params.eos_token_id of type int` I found the shallow cause of the error. and i fixed it naively. but i wanted to know why this error happned and what is the better solution. ## What I get from the error? the `Go struct` gets `int` for `eos_token_id`. But the `config.json` file and `generation_config.json` file contain `"eos_token_id": [128001, 128008, 128009]`. So they face type mismatch and we get the error. ## What is my naive solution? I changed the `eos_token_id` value to `"eos_token_id": 128001` without a concrete reason why i chose `128001` instead of the others. and it solved the problem and a model is created. ## Why this error happened? What is the correct solution? I don't know. I'd be appreciated to know. ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.2.8
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5909/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5909/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8200
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8200/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8200/comments
https://api.github.com/repos/ollama/ollama/issues/8200/events
https://github.com/ollama/ollama/issues/8200
2,754,208,755
I_kwDOJ0Z1Ps6kKePz
8,200
Ollama hangs when running llama3.2 and llama3.2:1b
{ "login": "pr0fsmith", "id": 54153368, "node_id": "MDQ6VXNlcjU0MTUzMzY4", "avatar_url": "https://avatars.githubusercontent.com/u/54153368?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pr0fsmith", "html_url": "https://github.com/pr0fsmith", "followers_url": "https://api.github.com/users/pr0fsmith/followers", "following_url": "https://api.github.com/users/pr0fsmith/following{/other_user}", "gists_url": "https://api.github.com/users/pr0fsmith/gists{/gist_id}", "starred_url": "https://api.github.com/users/pr0fsmith/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pr0fsmith/subscriptions", "organizations_url": "https://api.github.com/users/pr0fsmith/orgs", "repos_url": "https://api.github.com/users/pr0fsmith/repos", "events_url": "https://api.github.com/users/pr0fsmith/events{/privacy}", "received_events_url": "https://api.github.com/users/pr0fsmith/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
2024-12-21T16:25:10
2025-01-13T01:45:06
2025-01-13T01:45:06
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? After a while of using Ollama, the LLM becomes completely unresponsive and there's no CPU or GPU usage during that time. This happens with LLAMA3.2 and LLAMA3.2:1B. Here are the logs. ` Dec 21 00:37:03 olivi ollama[627]: [GIN] 2024/12/21 - 00:37:03 | 200 | 816.217µs | 172.17.0.2 | GET "/api/tags" Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: loaded meta data with 30 key-value pairs and 147 tensors from /usr/share/ollama/.ollama/models/blobs/sha256-74701a8c35f6c8d9a4b91f3f3497643001d63e0c7a84e085bed452548fa88d45 (version GGUF V3 (latest)) Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 0: general.architecture str = llama Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 1: general.type str = model Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 2: general.name str = Llama 3.2 1B Instruct Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 3: general.finetune str = Instruct Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 4: general.basename str = Llama-3.2 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 5: general.size_label str = 1B Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 6: general.tags arr[str,6] = ["facebook", "meta", "pytorch", "llam... Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 7: general.languages arr[str,8] = ["en", "de", "fr", "it", "pt", "hi", ... Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 8: llama.block_count u32 = 16 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 9: llama.context_length u32 = 131072 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 10: llama.embedding_length u32 = 2048 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 11: llama.feed_forward_length u32 = 8192 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 12: llama.attention.head_count u32 = 32 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 13: llama.attention.head_count_kv u32 = 8 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 14: llama.rope.freq_base f32 = 500000.000000 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 15: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 16: llama.attention.key_length u32 = 64 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 17: llama.attention.value_length u32 = 64 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 18: general.file_type u32 = 7 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 19: llama.vocab_size u32 = 128256 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 20: llama.rope.dimension_count u32 = 64 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 21: tokenizer.ggml.model str = gpt2 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 22: tokenizer.ggml.pre str = llama-bpe Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 23: tokenizer.ggml.tokens arr[str,128256] = ["!", "\"", "#", "$", "%", "&", "'", ... Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 24: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 25: tokenizer.ggml.merges arr[str,280147] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "... Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 26: tokenizer.ggml.bos_token_id u32 = 128000 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 27: tokenizer.ggml.eos_token_id u32 = 128009 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 28: tokenizer.chat_template str = {{- bos_token }}\n{%- if custom_tools ... Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - kv 29: general.quantization_version u32 = 2 Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - type f32: 34 tensors Dec 21 00:37:04 olivi ollama[627]: llama_model_loader: - type q8_0: 113 tensors Dec 21 00:37:04 olivi ollama[627]: llm_load_vocab: special tokens cache size = 256 Dec 21 00:37:04 olivi ollama[627]: llm_load_vocab: token to piece cache size = 0.7999 MB Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: format = GGUF V3 (latest) Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: arch = llama Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: vocab type = BPE Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: n_vocab = 128256 Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: n_merges = 280147 Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: vocab_only = 1 Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: model type = ?B Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: model ftype = all F32 Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: model params = 1.24 B Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: model size = 1.22 GiB (8.50 BPW) Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: general.name = Llama 3.2 1B Instruct Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>' Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: EOS token = 128009 '<|eot_id|>' Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: EOT token = 128009 '<|eot_id|>' Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: EOM token = 128008 '<|eom_id|>' Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: LF token = 128 'Ä' Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: EOG token = 128008 '<|eom_id|>' Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: EOG token = 128009 '<|eot_id|>' Dec 21 00:37:04 olivi ollama[627]: llm_load_print_meta: max token length = 256 Dec 21 00:37:04 olivi ollama[627]: llama_model_load: vocab only - skipping tensors Dec 21 06:08:13 olivi ollama[627]: [GIN] 2024/12/21 - 06:08:13 | 200 | 5h31m9s | 172.17.0.2 | POST "/api/chat" Dec 21 07:28:02 olivi ollama[627]: [GIN] 2024/12/21 - 07:28:02 | 200 | 10h12m29s | 172.17.0.2 | POST "/api/chat" Dec 21 11:09:13 olivi systemd[1]: Stopping ollama.service - Ollama Service... Dec 21 11:09:13 olivi systemd[1]: ollama.service: Deactivated successfully. Dec 21 11:09:13 olivi systemd[1]: Stopped ollama.service - Ollama Service. Dec 21 11:09:13 olivi systemd[1]: ollama.service: Consumed 4min 38.830s CPU time. Dec 21 11:09:13 olivi systemd[1]: Started ollama.service - Ollama Service. Dec 21 11:09:13 olivi ollama[34220]: 2024/12/21 11:09:13 routes.go:1259: INFO server config env="map[CUDA_VISIBLE_DEVICES: GPU_DEVICE_ORDINAL: HIP_VISIBLE_DEVICES: HSA_OVERRIDE_GFX_VERSION: HTTPS_PROXY: HTTP_PROXY: NO_PROXY: OLLAMA_DEBUG:false OLLAMA_FLASH_ATTENTION:false OLLAMA_GPU_OVERHEAD:0 OLLAMA_HOST:http://0.0.0.0:11434 OLLAMA_INTEL_GPU:false OLLAMA_KEEP_ALIVE:5m0s OLLAMA_KV_CACHE_TYPE: OLLAMA_LLM_LIBRARY: OLLAMA_LOAD_TIMEOUT:5m0s OLLAMA_MAX_LOADED_MODELS:0 OLLAMA_MAX_QUEUE:512 OLLAMA_MODELS:/usr/share/ollama/.ollama/models OLLAMA_MULTIUSER_CACHE:false OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:0 OLLAMA_ORIGINS:[http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:* app://* file://* tauri://* vscode-webview://*] OLLAMA_SCHED_SPREAD:false ROCR_VISIBLE_DEVICES: http_proxy: https_proxy: no_proxy:]" Dec 21 11:09:13 olivi ollama[34220]: time=2024-12-21T11:09:13.418-05:00 level=INFO source=images.go:757 msg="total blobs: 14" Dec 21 11:09:13 olivi ollama[34220]: time=2024-12-21T11:09:13.418-05:00 level=INFO source=images.go:764 msg="total unused blobs removed: 0" Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production. Dec 21 11:09:13 olivi ollama[34220]: - using env: export GIN_MODE=release Dec 21 11:09:13 olivi ollama[34220]: - using code: gin.SetMode(gin.ReleaseMode) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/pull --> github.com/ollama/ollama/server.(*Server).PullHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/generate --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/chat --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/embed --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/embeddings --> github.com/ollama/ollama/server.(*Server).EmbeddingsHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/create --> github.com/ollama/ollama/server.(*Server).CreateHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/push --> github.com/ollama/ollama/server.(*Server).PushHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/copy --> github.com/ollama/ollama/server.(*Server).CopyHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] DELETE /api/delete --> github.com/ollama/ollama/server.(*Server).DeleteHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/show --> github.com/ollama/ollama/server.(*Server).ShowHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /api/blobs/:digest --> github.com/ollama/ollama/server.(*Server).CreateBlobHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] HEAD /api/blobs/:digest --> github.com/ollama/ollama/server.(*Server).HeadBlobHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] GET /api/ps --> github.com/ollama/ollama/server.(*Server).PsHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /v1/chat/completions --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (6 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /v1/completions --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (6 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] POST /v1/embeddings --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (6 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] GET /v1/models --> github.com/ollama/ollama/server.(*Server).ListHandler-fm (6 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] GET /v1/models/:model --> github.com/ollama/ollama/server.(*Server).ShowHandler-fm (6 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] GET / --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] GET /api/tags --> github.com/ollama/ollama/server.(*Server).ListHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] GET /api/version --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] HEAD / --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] HEAD /api/tags --> github.com/ollama/ollama/server.(*Server).ListHandler-fm (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: [GIN-debug] HEAD /api/version --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers) Dec 21 11:09:13 olivi ollama[34220]: time=2024-12-21T11:09:13.419-05:00 level=INFO source=routes.go:1310 msg="Listening on [::]:11434 (version 0.5.4)" Dec 21 11:09:13 olivi ollama[34220]: time=2024-12-21T11:09:13.419-05:00 level=INFO source=routes.go:1339 msg="Dynamic LLM libraries" runners="[cpu cpu_avx cpu_avx2 cuda_v11_avx cuda_v12_avx rocm_avx]" Dec 21 11:09:13 olivi ollama[34220]: time=2024-12-21T11:09:13.419-05:00 level=INFO source=gpu.go:226 msg="looking for compatible GPUs" Dec 21 11:09:13 olivi ollama[34220]: time=2024-12-21T11:09:13.799-05:00 level=INFO source=types.go:131 msg="inference compute" id=GPU-20ba82ab-bf3d-11ef-bf4a-9e2501b9f7cf library=cuda variant=v12 compute=6.1 driver=12.2 name="GRID P4-4Q" total="4.0 GiB" available="2.9 GiB" Dec 21 11:09:23 olivi ollama[34220]: [GIN] 2024/12/21 - 11:09:23 | 200 | 62.241µs | 127.0.0.1 | HEAD "/" Dec 21 11:09:23 olivi ollama[34220]: [GIN] 2024/12/21 - 11:09:23 | 200 | 26.245321ms | 127.0.0.1 | POST "/api/show" Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.246-05:00 level=INFO source=sched.go:714 msg="new model will fit in available VRAM in single GPU, loading" model=/usr/share/ollama/.ollama/models/blobs/sha256-74701a8c35f6c8d9a4b91f3f3497643001d63e0c7a84e085bed452548fa88d45 gpu=GPU-20ba82ab-bf3d-11ef-bf4a-9e2501b9f7cf parallel=4 available=3091042304 required="2.5 GiB" Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.588-05:00 level=INFO source=server.go:104 msg="system memory" total="15.2 GiB" free="13.6 GiB" free_swap="975.0 MiB" Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.588-05:00 level=INFO source=memory.go:356 msg="offload to cuda" layers.requested=-1 layers.model=17 layers.offload=17 layers.split="" memory.available="[2.9 GiB]" memory.gpu_overhead="0 B" memory.required.full="2.5 GiB" memory.required.partial="2.5 GiB" memory.required.kv="256.0 MiB" memory.required.allocations="[2.5 GiB]" memory.weights.total="1.2 GiB" memory.weights.repeating="976.1 MiB" memory.weights.nonrepeating="266.2 MiB" memory.graph.full="544.0 MiB" memory.graph.partial="554.3 MiB" Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.589-05:00 level=INFO source=server.go:376 msg="starting llama server" cmd="/usr/local/lib/ollama/runners/cuda_v12_avx/ollama_llama_server runner --model /usr/share/ollama/.ollama/models/blobs/sha256-74701a8c35f6c8d9a4b91f3f3497643001d63e0c7a84e085bed452548fa88d45 --ctx-size 8192 --batch-size 512 --n-gpu-layers 17 --threads 16 --parallel 4 --port 45211" Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.590-05:00 level=INFO source=sched.go:449 msg="loaded runners" count=1 Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.590-05:00 level=INFO source=server.go:555 msg="waiting for llama runner to start responding" Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.590-05:00 level=INFO source=server.go:589 msg="waiting for server to become available" status="llm server error" Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.633-05:00 level=INFO source=runner.go:945 msg="starting go runner" Dec 21 11:09:24 olivi ollama[34220]: ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no Dec 21 11:09:24 olivi ollama[34220]: ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no Dec 21 11:09:24 olivi ollama[34220]: ggml_cuda_init: found 1 CUDA devices: Dec 21 11:09:24 olivi ollama[34220]: Device 0: GRID P4-4Q, compute capability 6.1, VMM: no Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.644-05:00 level=INFO source=runner.go:946 msg=system info="CUDA : ARCHS = 600,610,620,700,720,750,800,860,870,890,900 | USE_GRAPHS = 1 | PEER_MAX_BATCH_SIZE = 128 | CPU : SSE3 = 1 | SSSE3 = 1 | AVX = 1 | LLAMAFILE = 1 | AARCH64_REPACK = 1 | cgo(gcc)" threads=16 Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.644-05:00 level=INFO source=.:0 msg="Server listening on 127.0.0.1:45211" Dec 21 11:09:24 olivi ollama[34220]: time=2024-12-21T11:09:24.842-05:00 level=INFO source=server.go:589 msg="waiting for server to become available" status="llm server loading model" Dec 21 11:09:24 olivi ollama[34220]: llama_load_model_from_file: using device CUDA0 (GRID P4-4Q) - 2947 MiB free Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: loaded meta data with 30 key-value pairs and 147 tensors from /usr/share/ollama/.ollama/models/blobs/sha256-74701a8c35f6c8d9a4b91f3f3497643001d63e0c7a84e085bed452548fa88d45 (version GGUF V3 (latest)) Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 0: general.architecture str = llama Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 1: general.type str = model Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 2: general.name str = Llama 3.2 1B Instruct Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 3: general.finetune str = Instruct Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 4: general.basename str = Llama-3.2 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 5: general.size_label str = 1B Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 6: general.tags arr[str,6] = ["facebook", "meta", "pytorch", "llam... Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 7: general.languages arr[str,8] = ["en", "de", "fr", "it", "pt", "hi", ... Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 8: llama.block_count u32 = 16 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 9: llama.context_length u32 = 131072 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 10: llama.embedding_length u32 = 2048 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 11: llama.feed_forward_length u32 = 8192 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 12: llama.attention.head_count u32 = 32 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 13: llama.attention.head_count_kv u32 = 8 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 14: llama.rope.freq_base f32 = 500000.000000 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 15: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 16: llama.attention.key_length u32 = 64 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 17: llama.attention.value_length u32 = 64 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 18: general.file_type u32 = 7 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 19: llama.vocab_size u32 = 128256 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 20: llama.rope.dimension_count u32 = 64 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 21: tokenizer.ggml.model str = gpt2 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 22: tokenizer.ggml.pre str = llama-bpe Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 23: tokenizer.ggml.tokens arr[str,128256] = ["!", "\"", "#", "$", "%", "&", "'", ... Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 24: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 25: tokenizer.ggml.merges arr[str,280147] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "... Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 26: tokenizer.ggml.bos_token_id u32 = 128000 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 27: tokenizer.ggml.eos_token_id u32 = 128009 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 28: tokenizer.chat_template str = {{- bos_token }}\n{%- if custom_tools ... Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - kv 29: general.quantization_version u32 = 2 Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - type f32: 34 tensors Dec 21 11:09:25 olivi ollama[34220]: llama_model_loader: - type q8_0: 113 tensors Dec 21 11:09:25 olivi ollama[34220]: llm_load_vocab: special tokens cache size = 256 Dec 21 11:09:25 olivi ollama[34220]: llm_load_vocab: token to piece cache size = 0.7999 MB Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: format = GGUF V3 (latest) Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: arch = llama Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: vocab type = BPE Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_vocab = 128256 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_merges = 280147 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: vocab_only = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_ctx_train = 131072 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_embd = 2048 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_layer = 16 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_head = 32 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_head_kv = 8 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_rot = 64 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_swa = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_embd_head_k = 64 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_embd_head_v = 64 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_gqa = 4 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_embd_k_gqa = 512 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_embd_v_gqa = 512 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: f_norm_eps = 0.0e+00 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: f_norm_rms_eps = 1.0e-05 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: f_clamp_kqv = 0.0e+00 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: f_max_alibi_bias = 0.0e+00 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: f_logit_scale = 0.0e+00 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_ff = 8192 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_expert = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_expert_used = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: causal attn = 1 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: pooling type = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: rope type = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: rope scaling = linear Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: freq_base_train = 500000.0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: freq_scale_train = 1 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: n_ctx_orig_yarn = 131072 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: rope_finetuned = unknown Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: ssm_d_conv = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: ssm_d_inner = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: ssm_d_state = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: ssm_dt_rank = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: ssm_dt_b_c_rms = 0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: model type = 1B Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: model ftype = Q8_0 Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: model params = 1.24 B Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: model size = 1.22 GiB (8.50 BPW) Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: general.name = Llama 3.2 1B Instruct Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>' Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: EOS token = 128009 '<|eot_id|>' Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: EOT token = 128009 '<|eot_id|>' Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: EOM token = 128008 '<|eom_id|>' Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: LF token = 128 'Ä' Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: EOG token = 128008 '<|eom_id|>' Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: EOG token = 128009 '<|eot_id|>' Dec 21 11:09:25 olivi ollama[34220]: llm_load_print_meta: max token length = 256 Dec 21 11:09:26 olivi ollama[34220]: llm_load_tensors: offloading 16 repeating layers to GPU Dec 21 11:09:26 olivi ollama[34220]: llm_load_tensors: offloading output layer to GPU Dec 21 11:09:26 olivi ollama[34220]: llm_load_tensors: offloaded 17/17 layers to GPU Dec 21 11:09:26 olivi ollama[34220]: llm_load_tensors: CPU_Mapped model buffer size = 266.16 MiB Dec 21 11:09:26 olivi ollama[34220]: llm_load_tensors: CUDA0 model buffer size = 1252.41 MiB Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: n_seq_max = 4 Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: n_ctx = 8192 Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: n_ctx_per_seq = 2048 Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: n_batch = 2048 Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: n_ubatch = 512 Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: flash_attn = 0 Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: freq_base = 500000.0 Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: freq_scale = 1 Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: n_ctx_per_seq (2048) < n_ctx_train (131072) -- the full capacity of the model will not be utilized Dec 21 11:09:44 olivi ollama[34220]: llama_kv_cache_init: CUDA0 KV buffer size = 256.00 MiB Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: KV self size = 256.00 MiB, K (f16): 128.00 MiB, V (f16): 128.00 MiB Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: CUDA_Host output buffer size = 1.99 MiB Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: CUDA0 compute buffer size = 544.00 MiB Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: CUDA_Host compute buffer size = 20.01 MiB Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: graph nodes = 518 Dec 21 11:09:44 olivi ollama[34220]: llama_new_context_with_model: graph splits = 2 Dec 21 11:09:44 olivi ollama[34220]: time=2024-12-21T11:09:44.687-05:00 level=INFO source=server.go:594 msg="llama runner started in 20.10 seconds" Dec 21 11:09:44 olivi ollama[34220]: [GIN] 2024/12/21 - 11:09:44 | 200 | 20.862331914s | 127.0.0.1 | POST "/api/generate" ` ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.5.4
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8200/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8200/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3550
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3550/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3550/comments
https://api.github.com/repos/ollama/ollama/issues/3550/events
https://github.com/ollama/ollama/issues/3550
2,232,717,804
I_kwDOJ0Z1Ps6FFJHs
3,550
ollama serve cannot detect GPU
{ "login": "g-makerr", "id": 71173795, "node_id": "MDQ6VXNlcjcxMTczNzk1", "avatar_url": "https://avatars.githubusercontent.com/u/71173795?v=4", "gravatar_id": "", "url": "https://api.github.com/users/g-makerr", "html_url": "https://github.com/g-makerr", "followers_url": "https://api.github.com/users/g-makerr/followers", "following_url": "https://api.github.com/users/g-makerr/following{/other_user}", "gists_url": "https://api.github.com/users/g-makerr/gists{/gist_id}", "starred_url": "https://api.github.com/users/g-makerr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/g-makerr/subscriptions", "organizations_url": "https://api.github.com/users/g-makerr/orgs", "repos_url": "https://api.github.com/users/g-makerr/repos", "events_url": "https://api.github.com/users/g-makerr/events{/privacy}", "received_events_url": "https://api.github.com/users/g-makerr/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6430601766, "node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg", "url": "https://api.github.com/repos/ollama/ollama/labels/nvidia", "name": "nvidia", "color": "8CDB00", "default": false, "description": "Issues relating to Nvidia GPUs and CUDA" }, { "id": 6677745918, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgZQ_g", "url": "https://api.github.com/repos/ollama/ollama/labels/gpu", "name": "gpu", "color": "76C49E", "default": false, "description": "" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
8
2024-04-09T06:45:11
2024-05-01T17:52:34
2024-04-13T16:11:27
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I run "ollama serve", while it reports that "no GPU detected" and ""[cudart] error looking up CUDART GPU memory: cudart device memory info lookup failure 2"". But no such problems four days ago. ![屏幕截图 2024-04-09 144347](https://github.com/ollama/ollama/assets/71173795/b9c853a7-e4be-42ec-b4b1-967b7bcdf219) ### What did you expect to see? GPU detected & in use ### Steps to reproduce _No response_ ### Are there any recent changes that introduced the issue? _No response_ ### OS Linux ### Architecture amd64, x86 ### Platform _No response_ ### Ollama version 0.1.30 ### GPU Nvidia ### GPU info ![屏幕截图 2024-04-09 164724](https://github.com/ollama/ollama/assets/71173795/fcb0ae91-bbd2-4de9-86d7-b1a45b103a47) ### CPU _No response_ ### Other software _No response_
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3550/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3550/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2722
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2722/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2722/comments
https://api.github.com/repos/ollama/ollama/issues/2722/events
https://github.com/ollama/ollama/issues/2722
2,152,180,592
I_kwDOJ0Z1Ps6AR6tw
2,722
How can I specify the context window size using OpenAI compatible API?
{ "login": "egoist", "id": 8784712, "node_id": "MDQ6VXNlcjg3ODQ3MTI=", "avatar_url": "https://avatars.githubusercontent.com/u/8784712?v=4", "gravatar_id": "", "url": "https://api.github.com/users/egoist", "html_url": "https://github.com/egoist", "followers_url": "https://api.github.com/users/egoist/followers", "following_url": "https://api.github.com/users/egoist/following{/other_user}", "gists_url": "https://api.github.com/users/egoist/gists{/gist_id}", "starred_url": "https://api.github.com/users/egoist/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/egoist/subscriptions", "organizations_url": "https://api.github.com/users/egoist/orgs", "repos_url": "https://api.github.com/users/egoist/repos", "events_url": "https://api.github.com/users/egoist/events{/privacy}", "received_events_url": "https://api.github.com/users/egoist/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396220, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA", "url": "https://api.github.com/repos/ollama/ollama/labels/question", "name": "question", "color": "d876e3", "default": true, "description": "General questions" } ]
closed
false
null
[]
null
4
2024-02-24T07:52:58
2024-07-23T11:16:12
2024-07-18T22:41:39
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I wonder if there's a way to apply context size to https://github.com/ollama/ollama/blob/main/docs/openai.md
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2722/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2722/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2939
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2939/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2939/comments
https://api.github.com/repos/ollama/ollama/issues/2939/events
https://github.com/ollama/ollama/issues/2939
2,169,809,269
I_kwDOJ0Z1Ps6BVKl1
2,939
Model Request : WhiteRabbitNeo 33B v1.5
{ "login": "ligmaSec", "id": 87036992, "node_id": "MDQ6VXNlcjg3MDM2OTky", "avatar_url": "https://avatars.githubusercontent.com/u/87036992?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ligmaSec", "html_url": "https://github.com/ligmaSec", "followers_url": "https://api.github.com/users/ligmaSec/followers", "following_url": "https://api.github.com/users/ligmaSec/following{/other_user}", "gists_url": "https://api.github.com/users/ligmaSec/gists{/gist_id}", "starred_url": "https://api.github.com/users/ligmaSec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ligmaSec/subscriptions", "organizations_url": "https://api.github.com/users/ligmaSec/orgs", "repos_url": "https://api.github.com/users/ligmaSec/repos", "events_url": "https://api.github.com/users/ligmaSec/events{/privacy}", "received_events_url": "https://api.github.com/users/ligmaSec/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-03-05T17:46:08
2024-03-07T09:18:51
2024-03-06T23:46:50
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
https://huggingface.co/WhiteRabbitNeo/WhiteRabbitNeo-33B-v1.5
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2939/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2939/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6711
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6711/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6711/comments
https://api.github.com/repos/ollama/ollama/issues/6711/events
https://github.com/ollama/ollama/issues/6711
2,513,781,256
I_kwDOJ0Z1Ps6V1UII
6,711
Can I stop then start a "pull" when the LLM is not completely downloaded?
{ "login": "bulrush15", "id": 7031486, "node_id": "MDQ6VXNlcjcwMzE0ODY=", "avatar_url": "https://avatars.githubusercontent.com/u/7031486?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bulrush15", "html_url": "https://github.com/bulrush15", "followers_url": "https://api.github.com/users/bulrush15/followers", "following_url": "https://api.github.com/users/bulrush15/following{/other_user}", "gists_url": "https://api.github.com/users/bulrush15/gists{/gist_id}", "starred_url": "https://api.github.com/users/bulrush15/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bulrush15/subscriptions", "organizations_url": "https://api.github.com/users/bulrush15/orgs", "repos_url": "https://api.github.com/users/bulrush15/repos", "events_url": "https://api.github.com/users/bulrush15/events{/privacy}", "received_events_url": "https://api.github.com/users/bulrush15/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
2024-09-09T12:07:12
2024-09-09T13:59:49
2024-09-09T13:59:49
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I'm on Windows 11 with an Nvidia GeForce RTX 3060 video card. I've already used ollama with a smaller LLM that I pulled. I started a pull of a large LLM, mistral-large. It seems it would take 3 hours on my PC and it's really slowing down my network. So I stopped the download/pull. 1. If I restart the pull will Ollama save the contents that were already downloaded? 2. Or do I have to start the pull from the beginning again? 3. Do I have to delete the downloaded portion of the LLM first before restarting the pull? Thanks. ### OS Windows ### GPU Nvidia ### CPU AMD ### Ollama version 0.3.9
{ "login": "bulrush15", "id": 7031486, "node_id": "MDQ6VXNlcjcwMzE0ODY=", "avatar_url": "https://avatars.githubusercontent.com/u/7031486?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bulrush15", "html_url": "https://github.com/bulrush15", "followers_url": "https://api.github.com/users/bulrush15/followers", "following_url": "https://api.github.com/users/bulrush15/following{/other_user}", "gists_url": "https://api.github.com/users/bulrush15/gists{/gist_id}", "starred_url": "https://api.github.com/users/bulrush15/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bulrush15/subscriptions", "organizations_url": "https://api.github.com/users/bulrush15/orgs", "repos_url": "https://api.github.com/users/bulrush15/repos", "events_url": "https://api.github.com/users/bulrush15/events{/privacy}", "received_events_url": "https://api.github.com/users/bulrush15/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6711/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6711/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8595
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8595/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8595/comments
https://api.github.com/repos/ollama/ollama/issues/8595/events
https://github.com/ollama/ollama/issues/8595
2,811,649,642
I_kwDOJ0Z1Ps6nll5q
8,595
Train Ollama models using custom data
{ "login": "samrudha01codespace", "id": 144599345, "node_id": "U_kgDOCJ5pMQ", "avatar_url": "https://avatars.githubusercontent.com/u/144599345?v=4", "gravatar_id": "", "url": "https://api.github.com/users/samrudha01codespace", "html_url": "https://github.com/samrudha01codespace", "followers_url": "https://api.github.com/users/samrudha01codespace/followers", "following_url": "https://api.github.com/users/samrudha01codespace/following{/other_user}", "gists_url": "https://api.github.com/users/samrudha01codespace/gists{/gist_id}", "starred_url": "https://api.github.com/users/samrudha01codespace/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/samrudha01codespace/subscriptions", "organizations_url": "https://api.github.com/users/samrudha01codespace/orgs", "repos_url": "https://api.github.com/users/samrudha01codespace/repos", "events_url": "https://api.github.com/users/samrudha01codespace/events{/privacy}", "received_events_url": "https://api.github.com/users/samrudha01codespace/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
2
2025-01-26T16:24:59
2025-01-28T21:32:55
2025-01-28T21:32:55
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Can users train the small ollama models using there datasets?
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8595/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8595/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8053
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8053/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8053/comments
https://api.github.com/repos/ollama/ollama/issues/8053/events
https://github.com/ollama/ollama/issues/8053
2,733,979,055
I_kwDOJ0Z1Ps6i9TWv
8,053
Documentation enhancement Idea - AWS Fargate Infra Implementation
{ "login": "mcam10", "id": 42009541, "node_id": "MDQ6VXNlcjQyMDA5NTQx", "avatar_url": "https://avatars.githubusercontent.com/u/42009541?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mcam10", "html_url": "https://github.com/mcam10", "followers_url": "https://api.github.com/users/mcam10/followers", "following_url": "https://api.github.com/users/mcam10/following{/other_user}", "gists_url": "https://api.github.com/users/mcam10/gists{/gist_id}", "starred_url": "https://api.github.com/users/mcam10/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mcam10/subscriptions", "organizations_url": "https://api.github.com/users/mcam10/orgs", "repos_url": "https://api.github.com/users/mcam10/repos", "events_url": "https://api.github.com/users/mcam10/events{/privacy}", "received_events_url": "https://api.github.com/users/mcam10/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
0
2024-12-11T20:48:40
2024-12-11T20:48:40
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I was able to get Ollama up and running on [Fargate](https://aws.amazon.com/fargate/) using [copilot cli](https://aws.github.io/copilot-cli/) as a sandbox/test environment.. would this be helpful for the community?
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8053/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8053/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/6581
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6581/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6581/comments
https://api.github.com/repos/ollama/ollama/issues/6581/events
https://github.com/ollama/ollama/pull/6581
2,498,968,493
PR_kwDOJ0Z1Ps56Dygx
6,581
Add findutils to base images
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-08-31T17:32:32
2024-08-31T20:22:16
2024-08-31T17:40:05
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6581", "html_url": "https://github.com/ollama/ollama/pull/6581", "diff_url": "https://github.com/ollama/ollama/pull/6581.diff", "patch_url": "https://github.com/ollama/ollama/pull/6581.patch", "merged_at": "2024-08-31T17:40:05" }
This caused missing internal files
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6581/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6581/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5053
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5053/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5053/comments
https://api.github.com/repos/ollama/ollama/issues/5053/events
https://github.com/ollama/ollama/pull/5053
2,354,356,727
PR_kwDOJ0Z1Ps5yiRUi
5,053
feat: implemented a model export cli command
{ "login": "JerrettDavis", "id": 2610199, "node_id": "MDQ6VXNlcjI2MTAxOTk=", "avatar_url": "https://avatars.githubusercontent.com/u/2610199?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JerrettDavis", "html_url": "https://github.com/JerrettDavis", "followers_url": "https://api.github.com/users/JerrettDavis/followers", "following_url": "https://api.github.com/users/JerrettDavis/following{/other_user}", "gists_url": "https://api.github.com/users/JerrettDavis/gists{/gist_id}", "starred_url": "https://api.github.com/users/JerrettDavis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JerrettDavis/subscriptions", "organizations_url": "https://api.github.com/users/JerrettDavis/orgs", "repos_url": "https://api.github.com/users/JerrettDavis/repos", "events_url": "https://api.github.com/users/JerrettDavis/events{/privacy}", "received_events_url": "https://api.github.com/users/JerrettDavis/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
1
2024-06-15T01:05:02
2024-08-14T15:18:39
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
true
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5053", "html_url": "https://github.com/ollama/ollama/pull/5053", "diff_url": "https://github.com/ollama/ollama/pull/5053.diff", "patch_url": "https://github.com/ollama/ollama/pull/5053.patch", "merged_at": null }
First pass at solving #335. Converted the bash script provided by [supersonictw](https://github.com/supersonictw) to golang. Export a model by running `ollama export <model> <output>`. For example `ollama export llama3:latest llama-backup`.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5053/reactions", "total_count": 3, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 1, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5053/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7502
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7502/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7502/comments
https://api.github.com/repos/ollama/ollama/issues/7502/events
https://github.com/ollama/ollama/issues/7502
2,634,308,518
I_kwDOJ0Z1Ps6dBFum
7,502
ollama-server does not run the large model for a period of time. When running the large model again, an error message is displayed:
{ "login": "GreatStep", "id": 3817997, "node_id": "MDQ6VXNlcjM4MTc5OTc=", "avatar_url": "https://avatars.githubusercontent.com/u/3817997?v=4", "gravatar_id": "", "url": "https://api.github.com/users/GreatStep", "html_url": "https://github.com/GreatStep", "followers_url": "https://api.github.com/users/GreatStep/followers", "following_url": "https://api.github.com/users/GreatStep/following{/other_user}", "gists_url": "https://api.github.com/users/GreatStep/gists{/gist_id}", "starred_url": "https://api.github.com/users/GreatStep/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/GreatStep/subscriptions", "organizations_url": "https://api.github.com/users/GreatStep/orgs", "repos_url": "https://api.github.com/users/GreatStep/repos", "events_url": "https://api.github.com/users/GreatStep/events{/privacy}", "received_events_url": "https://api.github.com/users/GreatStep/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
2024-11-05T03:14:30
2024-11-07T03:26:51
2024-11-05T16:34:46
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ollama-server does not run the large model for a period of time. When running the large model again, an error message is displayed: server cpu not listed in available servers map[] Every time I restart Ollama, everything returns to normal. I have seen many users encounter similar situations online, and they were all solved by restarting. Is this a bug? Any help? ### OS Linux ### GPU Nvidia ### CPU _No response_ ### Ollama version 0.1.45
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7502/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7502/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2257
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2257/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2257/comments
https://api.github.com/repos/ollama/ollama/issues/2257/events
https://github.com/ollama/ollama/issues/2257
2,106,141,111
I_kwDOJ0Z1Ps59iSm3
2,257
[ask] Where can I see the version of llama.cpp used for each version of ollama?
{ "login": "iddar", "id": 199103, "node_id": "MDQ6VXNlcjE5OTEwMw==", "avatar_url": "https://avatars.githubusercontent.com/u/199103?v=4", "gravatar_id": "", "url": "https://api.github.com/users/iddar", "html_url": "https://github.com/iddar", "followers_url": "https://api.github.com/users/iddar/followers", "following_url": "https://api.github.com/users/iddar/following{/other_user}", "gists_url": "https://api.github.com/users/iddar/gists{/gist_id}", "starred_url": "https://api.github.com/users/iddar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/iddar/subscriptions", "organizations_url": "https://api.github.com/users/iddar/orgs", "repos_url": "https://api.github.com/users/iddar/repos", "events_url": "https://api.github.com/users/iddar/events{/privacy}", "received_events_url": "https://api.github.com/users/iddar/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-01-29T18:15:20
2024-02-02T00:08:44
2024-02-02T00:08:44
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I think it would be good to include the version of Ollama used in the release notes to know the new features.
{ "login": "iddar", "id": 199103, "node_id": "MDQ6VXNlcjE5OTEwMw==", "avatar_url": "https://avatars.githubusercontent.com/u/199103?v=4", "gravatar_id": "", "url": "https://api.github.com/users/iddar", "html_url": "https://github.com/iddar", "followers_url": "https://api.github.com/users/iddar/followers", "following_url": "https://api.github.com/users/iddar/following{/other_user}", "gists_url": "https://api.github.com/users/iddar/gists{/gist_id}", "starred_url": "https://api.github.com/users/iddar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/iddar/subscriptions", "organizations_url": "https://api.github.com/users/iddar/orgs", "repos_url": "https://api.github.com/users/iddar/repos", "events_url": "https://api.github.com/users/iddar/events{/privacy}", "received_events_url": "https://api.github.com/users/iddar/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2257/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2257/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8606
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8606/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8606/comments
https://api.github.com/repos/ollama/ollama/issues/8606/events
https://github.com/ollama/ollama/issues/8606
2,812,491,291
I_kwDOJ0Z1Ps6nozYb
8,606
Why doesn't my ollama use GPU
{ "login": "baotianxia", "id": 68735021, "node_id": "MDQ6VXNlcjY4NzM1MDIx", "avatar_url": "https://avatars.githubusercontent.com/u/68735021?v=4", "gravatar_id": "", "url": "https://api.github.com/users/baotianxia", "html_url": "https://github.com/baotianxia", "followers_url": "https://api.github.com/users/baotianxia/followers", "following_url": "https://api.github.com/users/baotianxia/following{/other_user}", "gists_url": "https://api.github.com/users/baotianxia/gists{/gist_id}", "starred_url": "https://api.github.com/users/baotianxia/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/baotianxia/subscriptions", "organizations_url": "https://api.github.com/users/baotianxia/orgs", "repos_url": "https://api.github.com/users/baotianxia/repos", "events_url": "https://api.github.com/users/baotianxia/events{/privacy}", "received_events_url": "https://api.github.com/users/baotianxia/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
21
2025-01-27T09:27:24
2025-01-28T02:37:10
2025-01-28T02:37:09
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I installed the Nvidia driver through `used sudo apt install nvidia-driver- xxx`and the ollama display model is being used on the GPU, but my CPU usage is 100% and the GPU is 0%. ![Image](https://github.com/user-attachments/assets/d9473bd3-953a-4f12-99d2-36420a7645d5) ![Image](https://github.com/user-attachments/assets/53934bc4-d3c2-4e77-83a3-14a57749edca) ![Image](https://github.com/user-attachments/assets/550849ce-12f8-46fb-a04c-4e23244e6742) Ubuntu server 24.04
{ "login": "baotianxia", "id": 68735021, "node_id": "MDQ6VXNlcjY4NzM1MDIx", "avatar_url": "https://avatars.githubusercontent.com/u/68735021?v=4", "gravatar_id": "", "url": "https://api.github.com/users/baotianxia", "html_url": "https://github.com/baotianxia", "followers_url": "https://api.github.com/users/baotianxia/followers", "following_url": "https://api.github.com/users/baotianxia/following{/other_user}", "gists_url": "https://api.github.com/users/baotianxia/gists{/gist_id}", "starred_url": "https://api.github.com/users/baotianxia/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/baotianxia/subscriptions", "organizations_url": "https://api.github.com/users/baotianxia/orgs", "repos_url": "https://api.github.com/users/baotianxia/repos", "events_url": "https://api.github.com/users/baotianxia/events{/privacy}", "received_events_url": "https://api.github.com/users/baotianxia/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8606/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8606/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2171
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2171/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2171/comments
https://api.github.com/repos/ollama/ollama/issues/2171/events
https://github.com/ollama/ollama/issues/2171
2,098,067,157
I_kwDOJ0Z1Ps59DfbV
2,171
Request: Please add `xwincoder` to `ollama.ai`
{ "login": "jukofyork", "id": 69222624, "node_id": "MDQ6VXNlcjY5MjIyNjI0", "avatar_url": "https://avatars.githubusercontent.com/u/69222624?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jukofyork", "html_url": "https://github.com/jukofyork", "followers_url": "https://api.github.com/users/jukofyork/followers", "following_url": "https://api.github.com/users/jukofyork/following{/other_user}", "gists_url": "https://api.github.com/users/jukofyork/gists{/gist_id}", "starred_url": "https://api.github.com/users/jukofyork/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jukofyork/subscriptions", "organizations_url": "https://api.github.com/users/jukofyork/orgs", "repos_url": "https://api.github.com/users/jukofyork/repos", "events_url": "https://api.github.com/users/jukofyork/events{/privacy}", "received_events_url": "https://api.github.com/users/jukofyork/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
0
2024-01-24T11:31:32
2024-01-24T17:28:13
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
There is already the 3 variants of `xwinlm` (https://ollama.ai/library/xwinlm) but no `xwincoder` (https://huggingface.co/Xwin-LM/XwinCoder-34B) and it seems to be quite a good coding model from what I've seen so far.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2171/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2171/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/4356
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4356/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4356/comments
https://api.github.com/repos/ollama/ollama/issues/4356/events
https://github.com/ollama/ollama/pull/4356
2,290,855,984
PR_kwDOJ0Z1Ps5vKGoy
4,356
Refactor parsing model configuration
{ "login": "redouan-rhazouani", "id": 81578195, "node_id": "MDQ6VXNlcjgxNTc4MTk1", "avatar_url": "https://avatars.githubusercontent.com/u/81578195?v=4", "gravatar_id": "", "url": "https://api.github.com/users/redouan-rhazouani", "html_url": "https://github.com/redouan-rhazouani", "followers_url": "https://api.github.com/users/redouan-rhazouani/followers", "following_url": "https://api.github.com/users/redouan-rhazouani/following{/other_user}", "gists_url": "https://api.github.com/users/redouan-rhazouani/gists{/gist_id}", "starred_url": "https://api.github.com/users/redouan-rhazouani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/redouan-rhazouani/subscriptions", "organizations_url": "https://api.github.com/users/redouan-rhazouani/orgs", "repos_url": "https://api.github.com/users/redouan-rhazouani/repos", "events_url": "https://api.github.com/users/redouan-rhazouani/events{/privacy}", "received_events_url": "https://api.github.com/users/redouan-rhazouani/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-05-11T11:46:08
2024-06-13T17:15:24
2024-06-13T17:13:35
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4356", "html_url": "https://github.com/ollama/ollama/pull/4356", "diff_url": "https://github.com/ollama/ollama/pull/4356.diff", "patch_url": "https://github.com/ollama/ollama/pull/4356.patch", "merged_at": null }
null
{ "login": "redouan-rhazouani", "id": 81578195, "node_id": "MDQ6VXNlcjgxNTc4MTk1", "avatar_url": "https://avatars.githubusercontent.com/u/81578195?v=4", "gravatar_id": "", "url": "https://api.github.com/users/redouan-rhazouani", "html_url": "https://github.com/redouan-rhazouani", "followers_url": "https://api.github.com/users/redouan-rhazouani/followers", "following_url": "https://api.github.com/users/redouan-rhazouani/following{/other_user}", "gists_url": "https://api.github.com/users/redouan-rhazouani/gists{/gist_id}", "starred_url": "https://api.github.com/users/redouan-rhazouani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/redouan-rhazouani/subscriptions", "organizations_url": "https://api.github.com/users/redouan-rhazouani/orgs", "repos_url": "https://api.github.com/users/redouan-rhazouani/repos", "events_url": "https://api.github.com/users/redouan-rhazouani/events{/privacy}", "received_events_url": "https://api.github.com/users/redouan-rhazouani/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4356/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4356/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6427
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6427/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6427/comments
https://api.github.com/repos/ollama/ollama/issues/6427/events
https://github.com/ollama/ollama/pull/6427
2,474,176,554
PR_kwDOJ0Z1Ps54x7Nu
6,427
CI: handle directories during checksum
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-08-19T20:45:58
2024-08-19T20:48:48
2024-08-19T20:48:45
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6427", "html_url": "https://github.com/ollama/ollama/pull/6427", "diff_url": "https://github.com/ollama/ollama/pull/6427.diff", "patch_url": "https://github.com/ollama/ollama/pull/6427.patch", "merged_at": "2024-08-19T20:48:45" }
null
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6427/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6427/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2818
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2818/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2818/comments
https://api.github.com/repos/ollama/ollama/issues/2818/events
https://github.com/ollama/ollama/issues/2818
2,159,956,174
I_kwDOJ0Z1Ps6AvlDO
2,818
DNS `i/o timeout` when running `ollama pull`
{ "login": "sohanasarah", "id": 38297094, "node_id": "MDQ6VXNlcjM4Mjk3MDk0", "avatar_url": "https://avatars.githubusercontent.com/u/38297094?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sohanasarah", "html_url": "https://github.com/sohanasarah", "followers_url": "https://api.github.com/users/sohanasarah/followers", "following_url": "https://api.github.com/users/sohanasarah/following{/other_user}", "gists_url": "https://api.github.com/users/sohanasarah/gists{/gist_id}", "starred_url": "https://api.github.com/users/sohanasarah/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sohanasarah/subscriptions", "organizations_url": "https://api.github.com/users/sohanasarah/orgs", "repos_url": "https://api.github.com/users/sohanasarah/repos", "events_url": "https://api.github.com/users/sohanasarah/events{/privacy}", "received_events_url": "https://api.github.com/users/sohanasarah/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
7
2024-02-28T22:11:15
2024-06-18T11:19:55
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Ollama was working perfectly on my machine, and I had llama2 installed. But now, when I try to install a new model, it gives me the following error: ``` pulling manifest Error: pull model manifest: Get "https://registry.ollama.ai/v2/library/mixtral/manifests/latest": dial tcp: lookup registry.ollama.ai on 172.25.96.1:53: read udp 172.25.107.139:59735->172.25.96.1:53: i/o timeout ``` I tried to stop and restart the server. Remove the existing model and download it again. Nothing is working. Any suggestion on how to resolve this?
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2818/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2818/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/6877
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6877/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6877/comments
https://api.github.com/repos/ollama/ollama/issues/6877/events
https://github.com/ollama/ollama/issues/6877
2,536,128,097
I_kwDOJ0Z1Ps6XKj5h
6,877
OpenAI o1-like Chain-of-thought (CoT) inference workflow
{ "login": "kozuch", "id": 1474153, "node_id": "MDQ6VXNlcjE0NzQxNTM=", "avatar_url": "https://avatars.githubusercontent.com/u/1474153?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kozuch", "html_url": "https://github.com/kozuch", "followers_url": "https://api.github.com/users/kozuch/followers", "following_url": "https://api.github.com/users/kozuch/following{/other_user}", "gists_url": "https://api.github.com/users/kozuch/gists{/gist_id}", "starred_url": "https://api.github.com/users/kozuch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kozuch/subscriptions", "organizations_url": "https://api.github.com/users/kozuch/orgs", "repos_url": "https://api.github.com/users/kozuch/repos", "events_url": "https://api.github.com/users/kozuch/events{/privacy}", "received_events_url": "https://api.github.com/users/kozuch/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
7
2024-09-19T11:56:08
2024-09-23T23:35:55
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Well, I am surprised that the "main" and "great" new feature of the new OpenAI o1 model is actually doing say "more sophisticated" inference workflow while employing something like Chain-of-thought process. Basically I understand it that even a "dumb" model can perform much better when it "thinks more" during inference. The great news they are telling us is that by "thinking more" you can get smarter, which is probably very true also for humans. The o1 model is probably trained to come up with its own CoT workflow for any given prompt, but I think it could be interesting to try to even hardcode some kind of workflow which any standard LLM model may try to follow during inference. Basically let the model analyze the prompt from various perspectives first and then try to judge on what type of "inference workflow" it should employ. The hardcoded workflow could look like this: 1. Prompt is submitted to the model. 2. The model asks itself couple of hard-coded questions about the prompt, maybe: - is that some light conversation (needing soft-skills like empathy etc) - does it look like a science problem (math, physics etc.) - can I break the prompt down to subtasks - if yes, the workflow will feed each subtask into the model separately, then combine the result etc. - is the problem easy/hard - do I have all information I need (do I need to ask the user for further input/clarification) 3. The workflow would run, maybe in multiple iterations on various its levels, maybe trying to fit some "quality checks" for the answer 4. The output is presented to the user (the "hidden" thinking may be optionally viewed by user) Anyone having the same feelings as I do about the CoT thing? Looks like even a hard-coded process may give some interesting results.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6877/reactions", "total_count": 7, "+1": 7, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6877/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/3559
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3559/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3559/comments
https://api.github.com/repos/ollama/ollama/issues/3559/events
https://github.com/ollama/ollama/pull/3559
2,233,882,790
PR_kwDOJ0Z1Ps5sJ6U-
3,559
ci: use go-version-file
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-04-09T16:50:41
2024-04-09T18:03:19
2024-04-09T18:03:19
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3559", "html_url": "https://github.com/ollama/ollama/pull/3559", "diff_url": "https://github.com/ollama/ollama/pull/3559.diff", "patch_url": "https://github.com/ollama/ollama/pull/3559.patch", "merged_at": "2024-04-09T18:03:18" }
use go-version-file to synchronize go versions between go.mod and ci
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3559/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3559/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/772
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/772/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/772/comments
https://api.github.com/repos/ollama/ollama/issues/772/events
https://github.com/ollama/ollama/pull/772
1,940,812,525
PR_kwDOJ0Z1Ps5cra6Y
772
linux: add user to the `ollama` group on install
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2023-10-12T21:22:16
2023-10-23T21:06:32
2023-10-23T21:06:31
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/772", "html_url": "https://github.com/ollama/ollama/pull/772", "diff_url": "https://github.com/ollama/ollama/pull/772.diff", "patch_url": "https://github.com/ollama/ollama/pull/772.patch", "merged_at": "2023-10-23T21:06:31" }
- Run the ollama system service as the current user resolves #613
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/772/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/772/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3468
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3468/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3468/comments
https://api.github.com/repos/ollama/ollama/issues/3468/events
https://github.com/ollama/ollama/pull/3468
2,221,684,533
PR_kwDOJ0Z1Ps5rgCYZ
3,468
feat: add NeuralSpeed backend to boost up the inference speed on CPU
{ "login": "ftian1", "id": 16394660, "node_id": "MDQ6VXNlcjE2Mzk0NjYw", "avatar_url": "https://avatars.githubusercontent.com/u/16394660?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ftian1", "html_url": "https://github.com/ftian1", "followers_url": "https://api.github.com/users/ftian1/followers", "following_url": "https://api.github.com/users/ftian1/following{/other_user}", "gists_url": "https://api.github.com/users/ftian1/gists{/gist_id}", "starred_url": "https://api.github.com/users/ftian1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ftian1/subscriptions", "organizations_url": "https://api.github.com/users/ftian1/orgs", "repos_url": "https://api.github.com/users/ftian1/repos", "events_url": "https://api.github.com/users/ftian1/events{/privacy}", "received_events_url": "https://api.github.com/users/ftian1/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-04-03T00:48:34
2024-11-21T09:29:19
2024-11-21T09:29:18
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3468", "html_url": "https://github.com/ollama/ollama/pull/3468", "diff_url": "https://github.com/ollama/ollama/pull/3468.diff", "patch_url": "https://github.com/ollama/ollama/pull/3468.patch", "merged_at": null }
This PR is used to integrate NeuralSpeed as a new backend in Ollama to provide better performance on x86_64 platforms. [NeuralSpeed](https://github.com/intel/neural-speed) is an LLM acceleration library by providing high efficient GEMM kernel and fusions on AVX/AVX2/AVX512. We can achieve better performance like [here](https://medium.com/@NeuralCompressor/llm-performance-of-intel-extension-for-transformers-f7d061556176). cmd to enable NeuralSpeed backend: ``` OLLAMA_LLM_LIBRARY=nscpu_avx2 ./ollama ```
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3468/reactions", "total_count": 10, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 6, "rocket": 4, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3468/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4678
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4678/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4678/comments
https://api.github.com/repos/ollama/ollama/issues/4678/events
https://github.com/ollama/ollama/issues/4678
2,320,549,377
I_kwDOJ0Z1Ps6KUMYB
4,678
Please support Baichuan series models
{ "login": "Han-Huaqiao", "id": 41456966, "node_id": "MDQ6VXNlcjQxNDU2OTY2", "avatar_url": "https://avatars.githubusercontent.com/u/41456966?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Han-Huaqiao", "html_url": "https://github.com/Han-Huaqiao", "followers_url": "https://api.github.com/users/Han-Huaqiao/followers", "following_url": "https://api.github.com/users/Han-Huaqiao/following{/other_user}", "gists_url": "https://api.github.com/users/Han-Huaqiao/gists{/gist_id}", "starred_url": "https://api.github.com/users/Han-Huaqiao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Han-Huaqiao/subscriptions", "organizations_url": "https://api.github.com/users/Han-Huaqiao/orgs", "repos_url": "https://api.github.com/users/Han-Huaqiao/repos", "events_url": "https://api.github.com/users/Han-Huaqiao/events{/privacy}", "received_events_url": "https://api.github.com/users/Han-Huaqiao/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
1
2024-05-28T09:16:31
2024-05-30T07:13:19
2024-05-30T07:11:59
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I expected to use ollama to start the Baichuan-7B-base model, but an error occurred: Error: Models based on 'BaichuanForCausalLM' are not yet supported. Please tell me when will the Baichuan series models be supported?
{ "login": "Han-Huaqiao", "id": 41456966, "node_id": "MDQ6VXNlcjQxNDU2OTY2", "avatar_url": "https://avatars.githubusercontent.com/u/41456966?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Han-Huaqiao", "html_url": "https://github.com/Han-Huaqiao", "followers_url": "https://api.github.com/users/Han-Huaqiao/followers", "following_url": "https://api.github.com/users/Han-Huaqiao/following{/other_user}", "gists_url": "https://api.github.com/users/Han-Huaqiao/gists{/gist_id}", "starred_url": "https://api.github.com/users/Han-Huaqiao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Han-Huaqiao/subscriptions", "organizations_url": "https://api.github.com/users/Han-Huaqiao/orgs", "repos_url": "https://api.github.com/users/Han-Huaqiao/repos", "events_url": "https://api.github.com/users/Han-Huaqiao/events{/privacy}", "received_events_url": "https://api.github.com/users/Han-Huaqiao/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4678/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4678/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/3359
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3359/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3359/comments
https://api.github.com/repos/ollama/ollama/issues/3359/events
https://github.com/ollama/ollama/issues/3359
2,207,847,294
I_kwDOJ0Z1Ps6DmRN-
3,359
Ollama Logo
{ "login": "corani", "id": 480775, "node_id": "MDQ6VXNlcjQ4MDc3NQ==", "avatar_url": "https://avatars.githubusercontent.com/u/480775?v=4", "gravatar_id": "", "url": "https://api.github.com/users/corani", "html_url": "https://github.com/corani", "followers_url": "https://api.github.com/users/corani/followers", "following_url": "https://api.github.com/users/corani/following{/other_user}", "gists_url": "https://api.github.com/users/corani/gists{/gist_id}", "starred_url": "https://api.github.com/users/corani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/corani/subscriptions", "organizations_url": "https://api.github.com/users/corani/orgs", "repos_url": "https://api.github.com/users/corani/repos", "events_url": "https://api.github.com/users/corani/events{/privacy}", "received_events_url": "https://api.github.com/users/corani/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-03-26T10:26:21
2024-04-15T19:43:06
2024-04-15T19:43:06
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I was playing with the `llava` model to create image-generation prompts based on an existing image. Starting with the Ollama logo I made a few iterations between llava and dall-e and ended up with the following result that I didn't want to keep to myself 😄 ![_cc4036ed-af48-4625-a85c-28b9e0b72249](https://github.com/ollama/ollama/assets/480775/2a447015-f2d6-46c2-9aac-0148b7b8746e) Use it however you see fit!
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3359/reactions", "total_count": 14, "+1": 1, "-1": 0, "laugh": 0, "hooray": 2, "confused": 0, "heart": 8, "rocket": 1, "eyes": 2 }
https://api.github.com/repos/ollama/ollama/issues/3359/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2410
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2410/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2410/comments
https://api.github.com/repos/ollama/ollama/issues/2410/events
https://github.com/ollama/ollama/pull/2410
2,125,053,307
PR_kwDOJ0Z1Ps5mXqrK
2,410
Added Encoding endpoint
{ "login": "suvalaki", "id": 18386930, "node_id": "MDQ6VXNlcjE4Mzg2OTMw", "avatar_url": "https://avatars.githubusercontent.com/u/18386930?v=4", "gravatar_id": "", "url": "https://api.github.com/users/suvalaki", "html_url": "https://github.com/suvalaki", "followers_url": "https://api.github.com/users/suvalaki/followers", "following_url": "https://api.github.com/users/suvalaki/following{/other_user}", "gists_url": "https://api.github.com/users/suvalaki/gists{/gist_id}", "starred_url": "https://api.github.com/users/suvalaki/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/suvalaki/subscriptions", "organizations_url": "https://api.github.com/users/suvalaki/orgs", "repos_url": "https://api.github.com/users/suvalaki/repos", "events_url": "https://api.github.com/users/suvalaki/events{/privacy}", "received_events_url": "https://api.github.com/users/suvalaki/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
0
2024-02-08T12:18:42
2024-02-08T12:23:18
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
true
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2410", "html_url": "https://github.com/ollama/ollama/pull/2410", "diff_url": "https://github.com/ollama/ollama/pull/2410.diff", "patch_url": "https://github.com/ollama/ollama/pull/2410.patch", "merged_at": null }
It seems useful to expose the encoding function of a model that us called by the generate methods to enable token counting (without running the model end to end). Some thoughts: - Im not sure whether replicating the logic that modifies the prompt (the same as the generate function is correct here or whether we should just simplify and just look at the raw prompt string. - I havent made a similar endpoint for getting the tokens from a chat call. My current thinking is that simplification of this would be better and i dont need replicate all the prompt logic necessarily. Im interested in feedback and if folks have any pushback to exposing the function. Following up from https://github.com/ollama/ollama/issues/1345 Btw the proposal makes a new endpoint `/api/encode` for requests of the following Looks a bit like this at a request level: Input ```json { "model": "mistral:latest", "prompt": "Why is the sky blue?" } ``` Output ```json { "model": "mistral:latest", "created_at": "2024-02-05T21:49:44.472893Z", "total_duration": 8965307875, "load_duration": 8961889917, "context": [ 733, 16289, 28793, 28705, 4315, 349, 272, 7212, 5045, 28804, 733, 28748, 16289, 28793, 13 ], "prompt_eval_count": 15 } ```
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2410/reactions", "total_count": 4, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 1, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2410/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7381
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7381/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7381/comments
https://api.github.com/repos/ollama/ollama/issues/7381/events
https://github.com/ollama/ollama/issues/7381
2,616,234,948
I_kwDOJ0Z1Ps6b8JPE
7,381
Unrooted Termux install process
{ "login": "b9Joker108", "id": 147242971, "node_id": "U_kgDOCMa_2w", "avatar_url": "https://avatars.githubusercontent.com/u/147242971?v=4", "gravatar_id": "", "url": "https://api.github.com/users/b9Joker108", "html_url": "https://github.com/b9Joker108", "followers_url": "https://api.github.com/users/b9Joker108/followers", "following_url": "https://api.github.com/users/b9Joker108/following{/other_user}", "gists_url": "https://api.github.com/users/b9Joker108/gists{/gist_id}", "starred_url": "https://api.github.com/users/b9Joker108/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/b9Joker108/subscriptions", "organizations_url": "https://api.github.com/users/b9Joker108/orgs", "repos_url": "https://api.github.com/users/b9Joker108/repos", "events_url": "https://api.github.com/users/b9Joker108/events{/privacy}", "received_events_url": "https://api.github.com/users/b9Joker108/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
4
2024-10-27T00:42:22
2024-10-27T14:37:38
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I am endeavouring to set up an `ollama` server on my unrooted Termux host environment, please refer: https://github.com/ollama/ollama/issues/7349#issuecomment-2439776813 and https://github.com/ollama/ollama/issues/7292#issuecomment-2439781839 @vpnry @dhiltgen So, the process is: ## Process 1. git clone --depth 1 https://github.com/ollama/ollama.git 2. cd ollama 3. go generate ./... 4. delete the two lines of code, as per: https://github.com/ollama/ollama/issues/7292#issuecomment-2427773036 5. and write. 6. go build . 7. ./ollama serve & 8. and prosper! Is that the go? ## Device details: ```zsh ❯ termux-info Termux Variables: TERMUX_API_VERSION=0.50.1 TERMUX_APK_RELEASE=F_DROID TERMUX_APP_PACKAGE_MANAGER=apt TERMUX_APP_PID=3465 TERMUX_IS_DEBUGGABLE_BUILD=0 TERMUX_MAIN_PACKAGE_FORMAT=debian TERMUX_VERSION=0.118.1 TERMUX__USER_ID=0 Packages CPU architecture: aarch64 Subscribed repositories: # sources.list deb https://packages-cf.termux.dev/apt/termux-main stable main # sources.list.d/pointless.list deb https://its-pointless.github.io/files/21 termux extras # sources.list.d/ivam3-termux-packages.list deb [trusted=yes arch=all] https://ivam3.github.io/termux-packages stable extras # x11-repo (sources.list.d/x11.list) deb https://packages-cf.termux.dev/apt/termux-x11 x11 main # tur-repo (sources.list.d/tur.list) deb https://tur.kcubeterm.com tur-packages tur tur-on-device tur-continuous # root-repo (sources.list.d/root.list) deb https://packages-cf.termux.dev/apt/termux-root root stable # sources.list.d/rendiix.list deb https://rendiix.github.io android-tools termux Updatable packages: command-not-found/stable 2.4.0-48 aarch64 [upgradable from: 2.4.0-47] libgit2/stable 1.8.3 aarch64 [upgradable from: 1.8.2] termux-tools version: 1.44.1 Android version: 14 Kernel build information: Linux localhost 5.15.123-android13-8-28577532-abX910XXS4BXG5 #1 SMP PREEMPT Thu Jul 11 02:48:07 UTC 2024 aarch64 Android Device manufacturer: samsung Device model: SM-X910 LD Variables: LD_LIBRARY_PATH=:/data/data/com.termux/files/home/.local/lib/ollama:/data/data/com.termux/files/usr/lib:/data/data/com.termux/files/home/install/lib LD_PRELOAD=/data/data/com.termux/files/usr/lib/libtermux-exec.so Installed termux plugins: com.termux.widget versionCode:13 com.termux.x11 versionCode:14 com.termux.api versionCode:51 com.termux.window versionCode:15 com.termux.styling versionCode:1000 ``` ### OS Linux ### GPU Other ### CPU Other ### Ollama version _No response_
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7381/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7381/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/2021
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2021/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2021/comments
https://api.github.com/repos/ollama/ollama/issues/2021/events
https://github.com/ollama/ollama/pull/2021
2,084,753,027
PR_kwDOJ0Z1Ps5kPKJD
2,021
Update README.md - Library - Haystack
{ "login": "sachinsachdeva", "id": 7625278, "node_id": "MDQ6VXNlcjc2MjUyNzg=", "avatar_url": "https://avatars.githubusercontent.com/u/7625278?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinsachdeva", "html_url": "https://github.com/sachinsachdeva", "followers_url": "https://api.github.com/users/sachinsachdeva/followers", "following_url": "https://api.github.com/users/sachinsachdeva/following{/other_user}", "gists_url": "https://api.github.com/users/sachinsachdeva/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinsachdeva/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinsachdeva/subscriptions", "organizations_url": "https://api.github.com/users/sachinsachdeva/orgs", "repos_url": "https://api.github.com/users/sachinsachdeva/repos", "events_url": "https://api.github.com/users/sachinsachdeva/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinsachdeva/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-01-16T19:57:08
2024-01-18T21:38:33
2024-01-18T21:38:32
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2021", "html_url": "https://github.com/ollama/ollama/pull/2021", "diff_url": "https://github.com/ollama/ollama/pull/2021.diff", "patch_url": "https://github.com/ollama/ollama/pull/2021.patch", "merged_at": "2024-01-18T21:38:32" }
Updated readme with the web link for haystack ollama integration
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2021/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2021/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4649
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4649/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4649/comments
https://api.github.com/repos/ollama/ollama/issues/4649/events
https://github.com/ollama/ollama/issues/4649
2,317,761,490
I_kwDOJ0Z1Ps6KJjvS
4,649
Settings File In Addition to Environment Flags
{ "login": "chigkim", "id": 22120994, "node_id": "MDQ6VXNlcjIyMTIwOTk0", "avatar_url": "https://avatars.githubusercontent.com/u/22120994?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chigkim", "html_url": "https://github.com/chigkim", "followers_url": "https://api.github.com/users/chigkim/followers", "following_url": "https://api.github.com/users/chigkim/following{/other_user}", "gists_url": "https://api.github.com/users/chigkim/gists{/gist_id}", "starred_url": "https://api.github.com/users/chigkim/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chigkim/subscriptions", "organizations_url": "https://api.github.com/users/chigkim/orgs", "repos_url": "https://api.github.com/users/chigkim/repos", "events_url": "https://api.github.com/users/chigkim/events{/privacy}", "received_events_url": "https://api.github.com/users/chigkim/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
1
2024-05-26T15:21:09
2024-05-31T19:57:59
2024-05-31T19:57:59
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Now there are quite a few features relying on environment variable. Can we have a way to control those features using settings file like ~/.ollama/settings.yaml for in MacOS? ```yaml OLLAMA_HOST: "0.0.0.0" OLLAMA_NOHISTORY: true OLLAMA_FLASH_ATTENTION: true OLLAMA_NUM_PARALLEL: 4 OLLAMA_MAX_LOADED: 2 ``` Thanks!
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4649/reactions", "total_count": 4, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4649/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4752
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4752/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4752/comments
https://api.github.com/repos/ollama/ollama/issues/4752/events
https://github.com/ollama/ollama/issues/4752
2,328,183,732
I_kwDOJ0Z1Ps6KxUO0
4,752
Multi-GPU and batch management
{ "login": "LaetLanf", "id": 131473617, "node_id": "U_kgDOB9Yg0Q", "avatar_url": "https://avatars.githubusercontent.com/u/131473617?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LaetLanf", "html_url": "https://github.com/LaetLanf", "followers_url": "https://api.github.com/users/LaetLanf/followers", "following_url": "https://api.github.com/users/LaetLanf/following{/other_user}", "gists_url": "https://api.github.com/users/LaetLanf/gists{/gist_id}", "starred_url": "https://api.github.com/users/LaetLanf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LaetLanf/subscriptions", "organizations_url": "https://api.github.com/users/LaetLanf/orgs", "repos_url": "https://api.github.com/users/LaetLanf/repos", "events_url": "https://api.github.com/users/LaetLanf/events{/privacy}", "received_events_url": "https://api.github.com/users/LaetLanf/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
1
2024-05-31T16:19:29
2024-06-02T09:09:29
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello, I'm confident that a feature enabling multi-GPU optimization and batch management would be beneficial. I may have made a mistake, as I couldn't effectively use the `ollama_num_parallel` and `ollama_max_loaded_models` settings to optimize my Linux VM, which has four A100 80GB GPUs, using Llama3:70b-instruct. I finally succeed to use the 4 GPUs in parallel, thanks to separate docker containers assigned to different ports. I also used AsyncClient() with Asyncio for effective asynchronous operations. In any case, I'm happy to share my code if it might help someone. # Assign docker containers to GPU and ports ``` sudo docker run -d --gpus=1 -v ollama:/root/.ollama -p 11435:11434 --name ollama0 ollama/ollama:latest sudo docker run -d --gpus=2 -v ollama:/root/.ollama -p 11436:11434 --name ollama1 ollama/ollama:latest sudo docker run -d --gpus=3 -v ollama:/root/.ollama -p 11437:11434 --name ollama2 ollama/ollama:latest sudo docker run -d --gpus=all -v ollama:/root/.ollama -p 11438:11434 --name ollama3 ollama/ollama:latest ``` # Pull llama3:70b-instruct ``` sudo docker exec -it ollama0 ollama pull llama3:70b-instruct sudo docker exec -it ollama1 ollama pull llama3:70b-instruct sudo docker exec -it ollama2 ollama pull llama3:70b-instruct sudo docker exec -it ollama3 ollama pull llama3:70b-instruct ``` # Python import ``` import asyncio import ollama from ollama import AsyncClient ``` # Chat Ollama with an asynchronous python function ``` async def ollama_chat_solo(client, messages, model_name): response = await client.chat(model=model_name, messages=messages, keep_alive=-1) return response ``` # Batch processing, Ollama client and queue management ``` async def ollama_chat_batches(df, client_pool, sys_instruction, model_name): nb_thread = len(df['id_msg']) # Create an empty queue: task_queue = asyncio.Queue() # Build and add each task to the queue: for i in range(0, nb_questions, 4): for j in range(len(client_pool)): if i + j < nb_questions: id_question = df['id_question'][i + j] question = df['question'][i + j] messages = [ {'role': "system", 'content': sys_instruction}, {'role': "user", 'content': question} ] task = asyncio.ensure_future(ollama_chat_solo(client_pool[j % len(client_pool)], messages, model_name)) await task_queue.put((id_question, task)) # Process tasks in the order they were added to the queue responses = [] while not task_queue.empty(): thread_id, task = await task_queue.get() response = await task # Wait for task completion if response is not None: responses.append((thread_id, response)) # Store response with thread ID return responses ``` # Calling ``` model_name = 'llama3:70b-instruct' client_pool = [AsyncClient(host='http://localhost:{}'.format(port)) for port in range(11435, 11439)] sys_instruction = f"""You are an expert in geographic. Answer the question.""" responses = await ollama_chat_batches(questions_df, client_pool, sys_instruction, model_name) ```
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4752/reactions", "total_count": 5, "+1": 5, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4752/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/6415
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6415/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6415/comments
https://api.github.com/repos/ollama/ollama/issues/6415/events
https://github.com/ollama/ollama/issues/6415
2,472,764,600
I_kwDOJ0Z1Ps6TY2S4
6,415
Feature Request: Adding FalconMamba 7B Instruct in `ollama`
{ "login": "younesbelkada", "id": 49240599, "node_id": "MDQ6VXNlcjQ5MjQwNTk5", "avatar_url": "https://avatars.githubusercontent.com/u/49240599?v=4", "gravatar_id": "", "url": "https://api.github.com/users/younesbelkada", "html_url": "https://github.com/younesbelkada", "followers_url": "https://api.github.com/users/younesbelkada/followers", "following_url": "https://api.github.com/users/younesbelkada/following{/other_user}", "gists_url": "https://api.github.com/users/younesbelkada/gists{/gist_id}", "starred_url": "https://api.github.com/users/younesbelkada/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/younesbelkada/subscriptions", "organizations_url": "https://api.github.com/users/younesbelkada/orgs", "repos_url": "https://api.github.com/users/younesbelkada/repos", "events_url": "https://api.github.com/users/younesbelkada/events{/privacy}", "received_events_url": "https://api.github.com/users/younesbelkada/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
6
2024-08-19T08:25:37
2024-10-13T02:38:10
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
FalconMamba is being added here in llama.cpp: https://github.com/ggerganov/llama.cpp/pull/9074 it would be nice to have the first SSM-based LLM on ollama ! Instruct weights: https://huggingface.co/tiiuae/falcon-mamba-7b-instruct GGUF weights: https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6415/reactions", "total_count": 12, "+1": 12, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6415/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/666
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/666/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/666/comments
https://api.github.com/repos/ollama/ollama/issues/666/events
https://github.com/ollama/ollama/issues/666
1,920,870,201
I_kwDOJ0Z1Ps5yfic5
666
Linux Installation `curl` command fails
{ "login": "Shihab-Shahriar", "id": 10344623, "node_id": "MDQ6VXNlcjEwMzQ0NjIz", "avatar_url": "https://avatars.githubusercontent.com/u/10344623?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Shihab-Shahriar", "html_url": "https://github.com/Shihab-Shahriar", "followers_url": "https://api.github.com/users/Shihab-Shahriar/followers", "following_url": "https://api.github.com/users/Shihab-Shahriar/following{/other_user}", "gists_url": "https://api.github.com/users/Shihab-Shahriar/gists{/gist_id}", "starred_url": "https://api.github.com/users/Shihab-Shahriar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Shihab-Shahriar/subscriptions", "organizations_url": "https://api.github.com/users/Shihab-Shahriar/orgs", "repos_url": "https://api.github.com/users/Shihab-Shahriar/repos", "events_url": "https://api.github.com/users/Shihab-Shahriar/events{/privacy}", "received_events_url": "https://api.github.com/users/Shihab-Shahriar/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 5755339642, "node_id": "LA_kwDOJ0Z1Ps8AAAABVwuDeg", "url": "https://api.github.com/repos/ollama/ollama/labels/linux", "name": "linux", "color": "516E70", "default": false, "description": "" } ]
closed
false
null
[]
null
15
2023-10-01T17:10:05
2024-08-01T15:30:00
2024-01-16T22:15:09
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
`curl https://ollama.ai/install.sh | sh` This leads to: ``` >>> Downloading ollama... Warning: Failed to open the file /tmp/tmp.hE5cI4TvS7/ollama: No such file or 0%##O#-# Warning: directory curl: (23) Failure writing output to destination ``` Ubuntu 22.04.3 LTS
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/666/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/666/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/7401
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7401/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7401/comments
https://api.github.com/repos/ollama/ollama/issues/7401/events
https://github.com/ollama/ollama/issues/7401
2,618,986,629
I_kwDOJ0Z1Ps6cGpCF
7,401
Configure docker image to start with some models installed
{ "login": "fnacarellidev", "id": 97247063, "node_id": "U_kgDOBcvfVw", "avatar_url": "https://avatars.githubusercontent.com/u/97247063?v=4", "gravatar_id": "", "url": "https://api.github.com/users/fnacarellidev", "html_url": "https://github.com/fnacarellidev", "followers_url": "https://api.github.com/users/fnacarellidev/followers", "following_url": "https://api.github.com/users/fnacarellidev/following{/other_user}", "gists_url": "https://api.github.com/users/fnacarellidev/gists{/gist_id}", "starred_url": "https://api.github.com/users/fnacarellidev/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fnacarellidev/subscriptions", "organizations_url": "https://api.github.com/users/fnacarellidev/orgs", "repos_url": "https://api.github.com/users/fnacarellidev/repos", "events_url": "https://api.github.com/users/fnacarellidev/events{/privacy}", "received_events_url": "https://api.github.com/users/fnacarellidev/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
1
2024-10-28T16:57:35
2024-10-29T15:22:06
2024-10-29T15:22:06
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I think it would be very nice if we had an option to install some models on the build stage of the ollama docker image, right now I have 2 workarounds to emulate this behaviour: 1. Have an init container that talks to the ollama container and installs the models, not very good because I can't cache that, so everytime I restart the kubernetes environment it has to reinstall each model, which might take long depending on the environment. 2. ```dockerfile FROM ollama/ollama RUN nohup bash -c "ollama serve &" && wait4x http http://127.0.0.1:11434 && ollama pull llava && ollama pull llama3 ``` However the 2nd option is not working very well, when I install the models in that way every request take a lot of time to finish, and sometimes it doesn't finishes, it simply hangs, not sure if there's something wrong with the logic, I copied from the #957 issue.
{ "login": "fnacarellidev", "id": 97247063, "node_id": "U_kgDOBcvfVw", "avatar_url": "https://avatars.githubusercontent.com/u/97247063?v=4", "gravatar_id": "", "url": "https://api.github.com/users/fnacarellidev", "html_url": "https://github.com/fnacarellidev", "followers_url": "https://api.github.com/users/fnacarellidev/followers", "following_url": "https://api.github.com/users/fnacarellidev/following{/other_user}", "gists_url": "https://api.github.com/users/fnacarellidev/gists{/gist_id}", "starred_url": "https://api.github.com/users/fnacarellidev/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fnacarellidev/subscriptions", "organizations_url": "https://api.github.com/users/fnacarellidev/orgs", "repos_url": "https://api.github.com/users/fnacarellidev/repos", "events_url": "https://api.github.com/users/fnacarellidev/events{/privacy}", "received_events_url": "https://api.github.com/users/fnacarellidev/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7401/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7401/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3006
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3006/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3006/comments
https://api.github.com/repos/ollama/ollama/issues/3006/events
https://github.com/ollama/ollama/pull/3006
2,176,459,602
PR_kwDOJ0Z1Ps5pGtUx
3,006
Replace assets on server start
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-03-08T17:20:43
2024-03-08T17:26:03
2024-03-08T17:26:02
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3006", "html_url": "https://github.com/ollama/ollama/pull/3006", "diff_url": "https://github.com/ollama/ollama/pull/3006.diff", "patch_url": "https://github.com/ollama/ollama/pull/3006.patch", "merged_at": null }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3006/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3006/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7197
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7197/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7197/comments
https://api.github.com/repos/ollama/ollama/issues/7197/events
https://github.com/ollama/ollama/issues/7197
2,585,716,651
I_kwDOJ0Z1Ps6aHuer
7,197
llama runner process no longer running: -1
{ "login": "Dhruv-1212", "id": 132161275, "node_id": "U_kgDOB-Ce-w", "avatar_url": "https://avatars.githubusercontent.com/u/132161275?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Dhruv-1212", "html_url": "https://github.com/Dhruv-1212", "followers_url": "https://api.github.com/users/Dhruv-1212/followers", "following_url": "https://api.github.com/users/Dhruv-1212/following{/other_user}", "gists_url": "https://api.github.com/users/Dhruv-1212/gists{/gist_id}", "starred_url": "https://api.github.com/users/Dhruv-1212/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Dhruv-1212/subscriptions", "organizations_url": "https://api.github.com/users/Dhruv-1212/orgs", "repos_url": "https://api.github.com/users/Dhruv-1212/repos", "events_url": "https://api.github.com/users/Dhruv-1212/events{/privacy}", "received_events_url": "https://api.github.com/users/Dhruv-1212/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-10-14T11:20:40
2024-10-15T06:22:44
2024-10-15T06:22:44
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I am trying to run llama3 models but getting this error on both pip installation and linux installation on a server with Tesla T4 GPU, but the same is working fine on my local machine. ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.3.3
{ "login": "Dhruv-1212", "id": 132161275, "node_id": "U_kgDOB-Ce-w", "avatar_url": "https://avatars.githubusercontent.com/u/132161275?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Dhruv-1212", "html_url": "https://github.com/Dhruv-1212", "followers_url": "https://api.github.com/users/Dhruv-1212/followers", "following_url": "https://api.github.com/users/Dhruv-1212/following{/other_user}", "gists_url": "https://api.github.com/users/Dhruv-1212/gists{/gist_id}", "starred_url": "https://api.github.com/users/Dhruv-1212/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Dhruv-1212/subscriptions", "organizations_url": "https://api.github.com/users/Dhruv-1212/orgs", "repos_url": "https://api.github.com/users/Dhruv-1212/repos", "events_url": "https://api.github.com/users/Dhruv-1212/events{/privacy}", "received_events_url": "https://api.github.com/users/Dhruv-1212/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7197/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7197/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6090
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6090/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6090/comments
https://api.github.com/repos/ollama/ollama/issues/6090/events
https://github.com/ollama/ollama/issues/6090
2,439,044,533
I_kwDOJ0Z1Ps6RYN21
6,090
Ollama seems to not work with long system prompts
{ "login": "austin-starks", "id": 53793927, "node_id": "MDQ6VXNlcjUzNzkzOTI3", "avatar_url": "https://avatars.githubusercontent.com/u/53793927?v=4", "gravatar_id": "", "url": "https://api.github.com/users/austin-starks", "html_url": "https://github.com/austin-starks", "followers_url": "https://api.github.com/users/austin-starks/followers", "following_url": "https://api.github.com/users/austin-starks/following{/other_user}", "gists_url": "https://api.github.com/users/austin-starks/gists{/gist_id}", "starred_url": "https://api.github.com/users/austin-starks/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/austin-starks/subscriptions", "organizations_url": "https://api.github.com/users/austin-starks/orgs", "repos_url": "https://api.github.com/users/austin-starks/repos", "events_url": "https://api.github.com/users/austin-starks/events{/privacy}", "received_events_url": "https://api.github.com/users/austin-starks/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-07-31T03:42:12
2024-07-31T12:43:26
2024-07-31T12:43:26
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? [I typed up the full problem here (with examples)](https://www.reddit.com/r/ollama/comments/1egddvv/getting_ollama_to_work_with_very_long_system/) TL;DR, if I try to run Ollama with a very long system prompt, it seems to completely ignore it. Happy to provide as much detail as you need to fix this. ### OS macOS ### GPU Apple ### CPU Apple ### Ollama version 0.2.8
{ "login": "austin-starks", "id": 53793927, "node_id": "MDQ6VXNlcjUzNzkzOTI3", "avatar_url": "https://avatars.githubusercontent.com/u/53793927?v=4", "gravatar_id": "", "url": "https://api.github.com/users/austin-starks", "html_url": "https://github.com/austin-starks", "followers_url": "https://api.github.com/users/austin-starks/followers", "following_url": "https://api.github.com/users/austin-starks/following{/other_user}", "gists_url": "https://api.github.com/users/austin-starks/gists{/gist_id}", "starred_url": "https://api.github.com/users/austin-starks/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/austin-starks/subscriptions", "organizations_url": "https://api.github.com/users/austin-starks/orgs", "repos_url": "https://api.github.com/users/austin-starks/repos", "events_url": "https://api.github.com/users/austin-starks/events{/privacy}", "received_events_url": "https://api.github.com/users/austin-starks/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6090/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6090/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4133
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4133/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4133/comments
https://api.github.com/repos/ollama/ollama/issues/4133/events
https://github.com/ollama/ollama/issues/4133
2,278,209,989
I_kwDOJ0Z1Ps6HyrnF
4,133
"which/max" command line options to help with sizing.
{ "login": "bigattichouse", "id": 67535, "node_id": "MDQ6VXNlcjY3NTM1", "avatar_url": "https://avatars.githubusercontent.com/u/67535?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bigattichouse", "html_url": "https://github.com/bigattichouse", "followers_url": "https://api.github.com/users/bigattichouse/followers", "following_url": "https://api.github.com/users/bigattichouse/following{/other_user}", "gists_url": "https://api.github.com/users/bigattichouse/gists{/gist_id}", "starred_url": "https://api.github.com/users/bigattichouse/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bigattichouse/subscriptions", "organizations_url": "https://api.github.com/users/bigattichouse/orgs", "repos_url": "https://api.github.com/users/bigattichouse/repos", "events_url": "https://api.github.com/users/bigattichouse/events{/privacy}", "received_events_url": "https://api.github.com/users/bigattichouse/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
0
2024-05-03T18:27:26
2024-05-03T18:28:06
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Frequently I have to play with various quants available, I'd like to run instead of downloading and testing each one until I get one that works. This would save us all some bandwidth. `ollama which somemodel` to determine which models I can run `ollama max somemodel` to choose the largest model from a list that I can run. Not sure how instruct/chat might interact, since those are usually after the colon perhaps with dashes? `ollama which somemodel-instruct` ? or some wildcard? `ollama which somemodel:*instruct*` ?
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4133/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4133/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/2663
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2663/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2663/comments
https://api.github.com/repos/ollama/ollama/issues/2663/events
https://github.com/ollama/ollama/issues/2663
2,148,213,419
I_kwDOJ0Z1Ps6ACyKr
2,663
gemma crashes ollama
{ "login": "donuts-are-good", "id": 96031819, "node_id": "U_kgDOBblUSw", "avatar_url": "https://avatars.githubusercontent.com/u/96031819?v=4", "gravatar_id": "", "url": "https://api.github.com/users/donuts-are-good", "html_url": "https://github.com/donuts-are-good", "followers_url": "https://api.github.com/users/donuts-are-good/followers", "following_url": "https://api.github.com/users/donuts-are-good/following{/other_user}", "gists_url": "https://api.github.com/users/donuts-are-good/gists{/gist_id}", "starred_url": "https://api.github.com/users/donuts-are-good/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/donuts-are-good/subscriptions", "organizations_url": "https://api.github.com/users/donuts-are-good/orgs", "repos_url": "https://api.github.com/users/donuts-are-good/repos", "events_url": "https://api.github.com/users/donuts-are-good/events{/privacy}", "received_events_url": "https://api.github.com/users/donuts-are-good/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
7
2024-02-22T05:09:20
2024-02-23T03:24:16
2024-02-22T16:05:22
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
![image](https://github.com/ollama/ollama/assets/96031819/58400f74-53e9-4d90-aea6-be291919a6f3)
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2663/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2663/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6658
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6658/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6658/comments
https://api.github.com/repos/ollama/ollama/issues/6658/events
https://github.com/ollama/ollama/pull/6658
2,508,535,448
PR_kwDOJ0Z1Ps56kOem
6,658
openai: support for structured outputs
{ "login": "iscy", "id": 294710, "node_id": "MDQ6VXNlcjI5NDcxMA==", "avatar_url": "https://avatars.githubusercontent.com/u/294710?v=4", "gravatar_id": "", "url": "https://api.github.com/users/iscy", "html_url": "https://github.com/iscy", "followers_url": "https://api.github.com/users/iscy/followers", "following_url": "https://api.github.com/users/iscy/following{/other_user}", "gists_url": "https://api.github.com/users/iscy/gists{/gist_id}", "starred_url": "https://api.github.com/users/iscy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/iscy/subscriptions", "organizations_url": "https://api.github.com/users/iscy/orgs", "repos_url": "https://api.github.com/users/iscy/repos", "events_url": "https://api.github.com/users/iscy/events{/privacy}", "received_events_url": "https://api.github.com/users/iscy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
5
2024-09-05T19:11:35
2024-11-13T15:26:25
2024-11-13T15:26:25
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6658", "html_url": "https://github.com/ollama/ollama/pull/6658", "diff_url": "https://github.com/ollama/ollama/pull/6658.diff", "patch_url": "https://github.com/ollama/ollama/pull/6658.patch", "merged_at": null }
This PR is enabling the [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) feature available on OpenAI. Using the math reasoning example they have on their website, here's the response of both OpenAI and Ollama using the exact same request: OpenAI (gpt-4o-2024-08-06) ``` { "steps": [ { "explanation": "We start with the equation 8x + 7 = -23. Our goal is to isolate x on one side of the equation.", "output": "8x + 7 = -23" }, { "explanation": "Subtract 7 from both sides to begin isolating the term with x. This will help us get rid of the constant term on the left side.", "output": "8x + 7 - 7 = -23 - 7" }, { "explanation": "Simplify both sides of the equation. On the left side, 7 - 7 cancels out, leaving us with 8x. On the right side, -23 - 7 equals -30.", "output": "8x = -30" }, { "explanation": "Now, divide both sides by 8 to solve for x. This will isolate x completely.", "output": "8x / 8 = -30 / 8" }, { "explanation": "Simplify the right side of the equation. -30 divided by 8 simplifies to -3.75.", "output": "x = -3.75" } ], "final_answer": "x = -3.75" } ``` Ollama (llama3.1:8b) ``` { "steps": [ { "explanation": "The first step is to isolate the term with the variable (in this case, x) on one side of the equation.", "output": "Subtract 7 from both sides: 8x + 7 - 7 = -23 - 7" }, { "explanation": "This simplifies to:", "output": "8x = -30" }, { "explanation": "The next step is to get rid of the coefficient (the number being multiplied by x).", "output": "Divide both sides by 8: 8x / 8 = -30 / 8" }, { "explanation": "This simplifies to:", "output": "x = -3.75" } ], "final_answer": "-3.75" } ```
{ "login": "iscy", "id": 294710, "node_id": "MDQ6VXNlcjI5NDcxMA==", "avatar_url": "https://avatars.githubusercontent.com/u/294710?v=4", "gravatar_id": "", "url": "https://api.github.com/users/iscy", "html_url": "https://github.com/iscy", "followers_url": "https://api.github.com/users/iscy/followers", "following_url": "https://api.github.com/users/iscy/following{/other_user}", "gists_url": "https://api.github.com/users/iscy/gists{/gist_id}", "starred_url": "https://api.github.com/users/iscy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/iscy/subscriptions", "organizations_url": "https://api.github.com/users/iscy/orgs", "repos_url": "https://api.github.com/users/iscy/repos", "events_url": "https://api.github.com/users/iscy/events{/privacy}", "received_events_url": "https://api.github.com/users/iscy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6658/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6658/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2797
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2797/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2797/comments
https://api.github.com/repos/ollama/ollama/issues/2797/events
https://github.com/ollama/ollama/issues/2797
2,157,882,000
I_kwDOJ0Z1Ps6AnqqQ
2,797
Please consider supporting Intel GPU ARC A770 (16G)
{ "login": "HelloMorningStar", "id": 46133290, "node_id": "MDQ6VXNlcjQ2MTMzMjkw", "avatar_url": "https://avatars.githubusercontent.com/u/46133290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/HelloMorningStar", "html_url": "https://github.com/HelloMorningStar", "followers_url": "https://api.github.com/users/HelloMorningStar/followers", "following_url": "https://api.github.com/users/HelloMorningStar/following{/other_user}", "gists_url": "https://api.github.com/users/HelloMorningStar/gists{/gist_id}", "starred_url": "https://api.github.com/users/HelloMorningStar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/HelloMorningStar/subscriptions", "organizations_url": "https://api.github.com/users/HelloMorningStar/orgs", "repos_url": "https://api.github.com/users/HelloMorningStar/repos", "events_url": "https://api.github.com/users/HelloMorningStar/events{/privacy}", "received_events_url": "https://api.github.com/users/HelloMorningStar/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 6677491450, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgJu-g", "url": "https://api.github.com/repos/ollama/ollama/labels/intel", "name": "intel", "color": "226E5B", "default": false, "description": "issues relating to Intel GPUs" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
3
2024-02-28T01:05:03
2024-04-15T22:33:39
2024-04-15T22:33:39
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Here is a demo of ARC A770 running llama2: https://www.reddit.com/r/LocalLLaMA/comments/1b0c6u8/llama_2_inference_with_pytorch_on_intel_arc/ The Intel Arc A770 is a powerful graphics card that is well-suited for a variety of tasks, including machine learning. It has 16GB of GDDR6 memory, a 256-bit memory interface, and a boost clock of 2.1 GHz. It also supports ray tracing and XeSS, which can improve performance in games and other applications. The llama2 demo shows how the Intel Arc A770 can be used to accelerate machine learning inference tasks. The demo uses PyTorch, a popular machine learning framework, to run a llama2 model on the Intel Arc A770. The results show that the Intel Arc A770 can achieve significant performance gains over CPUs and other GPUs.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2797/reactions", "total_count": 22, "+1": 14, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 8 }
https://api.github.com/repos/ollama/ollama/issues/2797/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4788
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4788/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4788/comments
https://api.github.com/repos/ollama/ollama/issues/4788/events
https://github.com/ollama/ollama/issues/4788
2,329,745,994
I_kwDOJ0Z1Ps6K3RpK
4,788
Add EventSource for format /api/generate
{ "login": "Vali-98", "id": 137794480, "node_id": "U_kgDOCDaTsA", "avatar_url": "https://avatars.githubusercontent.com/u/137794480?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Vali-98", "html_url": "https://github.com/Vali-98", "followers_url": "https://api.github.com/users/Vali-98/followers", "following_url": "https://api.github.com/users/Vali-98/following{/other_user}", "gists_url": "https://api.github.com/users/Vali-98/gists{/gist_id}", "starred_url": "https://api.github.com/users/Vali-98/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Vali-98/subscriptions", "organizations_url": "https://api.github.com/users/Vali-98/orgs", "repos_url": "https://api.github.com/users/Vali-98/repos", "events_url": "https://api.github.com/users/Vali-98/events{/privacy}", "received_events_url": "https://api.github.com/users/Vali-98/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
1
2024-06-02T16:27:19
2024-07-03T07:03:11
2024-07-03T07:03:10
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? This was tested specifically with `/api/generate` and `react-native-sse`. https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events Stream responses sent in ollama doesn't seem to conform to SSE specifications, and breaks when using it with EventSource-like libraries. Having this implementation will help with frontends and systems which prefer the EventSource format. ### OS Windows ### GPU Nvidia ### CPU AMD ### Ollama version 0.1.39
{ "login": "Vali-98", "id": 137794480, "node_id": "U_kgDOCDaTsA", "avatar_url": "https://avatars.githubusercontent.com/u/137794480?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Vali-98", "html_url": "https://github.com/Vali-98", "followers_url": "https://api.github.com/users/Vali-98/followers", "following_url": "https://api.github.com/users/Vali-98/following{/other_user}", "gists_url": "https://api.github.com/users/Vali-98/gists{/gist_id}", "starred_url": "https://api.github.com/users/Vali-98/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Vali-98/subscriptions", "organizations_url": "https://api.github.com/users/Vali-98/orgs", "repos_url": "https://api.github.com/users/Vali-98/repos", "events_url": "https://api.github.com/users/Vali-98/events{/privacy}", "received_events_url": "https://api.github.com/users/Vali-98/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4788/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4788/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8425
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8425/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8425/comments
https://api.github.com/repos/ollama/ollama/issues/8425/events
https://github.com/ollama/ollama/issues/8425
2,788,066,594
I_kwDOJ0Z1Ps6mLoUi
8,425
The models only work on CPU, but cannot work on GPU
{ "login": "watashiwastar-yun", "id": 188650638, "node_id": "U_kgDOCz6Ujg", "avatar_url": "https://avatars.githubusercontent.com/u/188650638?v=4", "gravatar_id": "", "url": "https://api.github.com/users/watashiwastar-yun", "html_url": "https://github.com/watashiwastar-yun", "followers_url": "https://api.github.com/users/watashiwastar-yun/followers", "following_url": "https://api.github.com/users/watashiwastar-yun/following{/other_user}", "gists_url": "https://api.github.com/users/watashiwastar-yun/gists{/gist_id}", "starred_url": "https://api.github.com/users/watashiwastar-yun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/watashiwastar-yun/subscriptions", "organizations_url": "https://api.github.com/users/watashiwastar-yun/orgs", "repos_url": "https://api.github.com/users/watashiwastar-yun/repos", "events_url": "https://api.github.com/users/watashiwastar-yun/events{/privacy}", "received_events_url": "https://api.github.com/users/watashiwastar-yun/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2025-01-14T18:58:01
2025-01-15T12:07:19
2025-01-15T12:07:19
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Jan 15 01:51:05 root ollama[600447]: time=2025-01-15T01:51:05.970+08:00 level=INFO source=server.go:104 msg="system memory" total="503.7 GiB" free="436.7 GiB" free_swap="228.0 KiB" Jan 15 01:51:05 root ollama[600447]: time=2025-01-15T01:51:05.970+08:00 level=WARN source=config.go:215 msg="invalid environment variable, using default" key=OLLAMA_GPU_OVERHEAD value=0.1 default=0 Jan 15 01:51:05 root ollama[600447]: time=2025-01-15T01:51:05.971+08:00 level=WARN source=config.go:215 msg="invalid environment variable, using default" key=OLLAMA_GPU_OVERHEAD value=0.1 default=0 Jan 15 01:51:05 root ollama[600447]: time=2025-01-15T01:51:05.971+08:00 level=INFO source=memory.go:356 msg="offload to cuda" layers.requested=-1 layers.model=13 layers.offload=13 layers.split="" memory.available="[39.1 GiB]" memory.gpu_overhead="0 B" memory.required.full="809.9 MiB" memory.required.partial="809.9 MiB" memory.required.kv="24.0 MiB" memory.required.allocations="[809.9 MiB]" memory.weights.total="240.1 MiB" memory.weights.repeating="195.4 MiB" memory.weights.nonrepeating="44.7 MiB" memory.graph.full="48.0 MiB" memory.graph.partial="48.0 MiB" Jan 15 01:51:05 root ollama[600447]: time=2025-01-15T01:51:05.971+08:00 level=INFO source=server.go:158 msg="Invalid OLLAMA_LLM_LIBRARY cuda - not found" Jan 15 01:51:05 root ollama[600447]: time=2025-01-15T01:51:05.971+08:00 level=INFO source=server.go:376 msg="starting llama server" cmd="/usr/local/bin/ollama runner --model /home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 --ctx-size 8192 --batch-size 512 --n-gpu-layers 13 --threads 96 --parallel 1 --port 39513" Jan 15 01:51:05 root ollama[600447]: time=2025-01-15T01:51:05.972+08:00 level=INFO source=sched.go:449 msg="loaded runners" count=2 Jan 15 01:51:05 root ollama[600447]: time=2025-01-15T01:51:05.972+08:00 level=INFO source=server.go:555 msg="waiting for llama runner to start responding" Jan 15 01:51:05 root ollama[600447]: time=2025-01-15T01:51:05.973+08:00 level=INFO source=server.go:589 msg="waiting for server to become available" status="llm server error" Jan 15 01:51:06 root ollama[600447]: 2025/01/15 01:51:06 config.go:215: WARN invalid environment variable, using default key=OLLAMA_GPU_OVERHEAD value=0.1 default=0 Jan 15 01:51:06 root ollama[600447]: time=2025-01-15T01:51:06.004+08:00 level=INFO source=runner.go:945 msg="starting go runner" Jan 15 01:51:06 root ollama[600447]: time=2025-01-15T01:51:06.004+08:00 level=INFO source=runner.go:946 msg=system info="CPU : LLAMAFILE = 1 | AARCH64_REPACK = 1 | CPU : LLAMAFILE = 1 | AARCH64_REPACK = 1 | cgo(gcc)" threads=96 Jan 15 01:51:06 root ollama[600447]: time=2025-01-15T01:51:06.004+08:00 level=INFO source=runner.go:1004 msg="Server listening on 127.0.0.1:39513" Jan 15 01:51:06 root ollama[600447]: llama_model_loader: loaded meta data with 24 key-value pairs and 112 tensors from /home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 (version GGUF V3 (latest)) Jan 15 01:51:06 root ollama[600447]: llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 0: general.architecture str = nomic-bert Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 1: general.name str = nomic-embed-text-v1.5 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 2: nomic-bert.block_count u32 = 12 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 3: nomic-bert.context_length u32 = 2048 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 4: nomic-bert.embedding_length u32 = 768 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 5: nomic-bert.feed_forward_length u32 = 3072 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 6: nomic-bert.attention.head_count u32 = 12 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 7: nomic-bert.attention.layer_norm_epsilon f32 = 0.000000 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 8: general.file_type u32 = 1 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 9: nomic-bert.attention.causal bool = false Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 10: nomic-bert.pooling_type u32 = 1 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 11: nomic-bert.rope.freq_base f32 = 1000.000000 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 12: tokenizer.ggml.token_type_count u32 = 2 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 13: tokenizer.ggml.bos_token_id u32 = 101 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 14: tokenizer.ggml.eos_token_id u32 = 102 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 15: tokenizer.ggml.model str = bert Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 16: tokenizer.ggml.tokens arr[str,30522] = ["[PAD]", "[unused0]", "[unused1]", "... Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 17: tokenizer.ggml.scores arr[f32,30522] = [-1000.000000, -1000.000000, -1000.00... Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 18: tokenizer.ggml.token_type arr[i32,30522] = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 19: tokenizer.ggml.unknown_token_id u32 = 100 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 20: tokenizer.ggml.seperator_token_id u32 = 102 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 21: tokenizer.ggml.padding_token_id u32 = 0 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 22: tokenizer.ggml.cls_token_id u32 = 101 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - kv 23: tokenizer.ggml.mask_token_id u32 = 103 Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - type f32: 51 tensors Jan 15 01:51:06 root ollama[600447]: llama_model_loader: - type f16: 61 tensors Jan 15 01:51:06 root ollama[600447]: llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect Jan 15 01:51:06 root ollama[600447]: llm_load_vocab: special tokens cache size = 5 Jan 15 01:51:06 root ollama[600447]: llm_load_vocab: token to piece cache size = 0.2032 MB Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: format = GGUF V3 (latest) Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: arch = nomic-bert Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: vocab type = WPM Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_vocab = 30522 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_merges = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: vocab_only = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_ctx_train = 2048 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_embd = 768 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_layer = 12 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_head = 12 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_head_kv = 12 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_rot = 64 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_swa = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_embd_head_k = 64 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_embd_head_v = 64 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_gqa = 1 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_embd_k_gqa = 768 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_embd_v_gqa = 768 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: f_norm_eps = 1.0e-12 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: f_norm_rms_eps = 0.0e+00 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: f_clamp_kqv = 0.0e+00 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: f_max_alibi_bias = 0.0e+00 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: f_logit_scale = 0.0e+00 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_ff = 3072 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_expert = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_expert_used = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: causal attn = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: pooling type = 1 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: rope type = 2 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: rope scaling = linear Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: freq_base_train = 1000.0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: freq_scale_train = 1 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: n_ctx_orig_yarn = 2048 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: rope_finetuned = unknown Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: ssm_d_conv = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: ssm_d_inner = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: ssm_d_state = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: ssm_dt_rank = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: ssm_dt_b_c_rms = 0 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: model type = 137M Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: model ftype = F16 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: model params = 136.73 M Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: model size = 260.86 MiB (16.00 BPW) Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: general.name = nomic-embed-text-v1.5 Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: BOS token = 101 '[CLS]' Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: EOS token = 102 '[SEP]' Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: UNK token = 100 '[UNK]' Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: SEP token = 102 '[SEP]' Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: PAD token = 0 '[PAD]' Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: CLS token = 101 '[CLS]' Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: MASK token = 103 '[MASK]' Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: LF token = 0 '[PAD]' Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: EOG token = 102 '[SEP]' Jan 15 01:51:06 root ollama[600447]: llm_load_print_meta: max token length = 21 Jan 15 01:51:06 root ollama[600447]: llm_load_tensors: CPU_Mapped model buffer size = 260.86 MiB Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: n_seq_max = 1 Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: n_ctx = 8192 Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: n_ctx_per_seq = 8192 Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: n_batch = 512 Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: n_ubatch = 512 Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: flash_attn = 0 Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: freq_base = 1000.0 Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: freq_scale = 1 Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: n_ctx_pre_seq (8192) > n_ctx_train (2048) -- possible training context overflow Jan 15 01:51:06 root ollama[600447]: time=2025-01-15T01:51:06.225+08:00 level=INFO source=server.go:589 msg="waiting for server to become available" status="llm server loading model" Jan 15 01:51:06 root ollama[600447]: llama_kv_cache_init: CPU KV buffer size = 288.00 MiB Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: KV self size = 288.00 MiB, K (f16): 144.00 MiB, V (f16): 144.00 MiB Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: CPU output buffer size = 0.00 MiB Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: CPU compute buffer size = 23.00 MiB Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: graph nodes = 453 Jan 15 01:51:06 root ollama[600447]: llama_new_context_with_model: graph splits = 1 Jan 15 01:51:06 root ollama[600447]: time=2025-01-15T01:51:06.476+08:00 level=INFO source=server.go:594 msg="llama runner started in 0.50 seconds" Jan 15 01:51:06 root ollama[600447]: [GIN] 2025/01/15 - 01:51:06 | 200 | 1.407517797s | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:06 root ollama[600447]: [GIN] 2025/01/15 - 01:51:06 | 200 | 102.697254ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:06 root ollama[600447]: [GIN] 2025/01/15 - 01:51:06 | 200 | 108.743335ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:35 root ollama[600447]: time=2025-01-15T01:51:35.610+08:00 level=INFO source=routes.go:541 msg="embedding generation failed: do embedding request: Post \"http://127.0.0.1:39513/embedding\": context canceled" Jan 15 01:51:35 root ollama[600447]: [GIN] 2025/01/15 - 01:51:35 | 500 | 58.026813ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:35 root ollama[600447]: time=2025-01-15T01:51:35.656+08:00 level=INFO source=server.go:3489 msg="http: superfluous response.WriteHeader call from github.com/ollama/ollama/llama/runner.(*Server).embeddings (runner.go:796)" Jan 15 01:51:36 root ollama[600447]: [GIN] 2025/01/15 - 01:51:36 | 200 | 112.098908ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:36 root ollama[600447]: [GIN] 2025/01/15 - 01:51:36 | 200 | 111.793891ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:36 root ollama[600447]: [GIN] 2025/01/15 - 01:51:36 | 200 | 94.901386ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:37 root ollama[600447]: [GIN] 2025/01/15 - 01:51:37 | 200 | 88.451238ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:37 root ollama[600447]: [GIN] 2025/01/15 - 01:51:37 | 200 | 95.882779ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:37 root ollama[600447]: [GIN] 2025/01/15 - 01:51:37 | 200 | 108.765545ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:37 root ollama[600447]: [GIN] 2025/01/15 - 01:51:37 | 200 | 95.350763ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:37 root ollama[600447]: [GIN] 2025/01/15 - 01:51:37 | 200 | 105.795098ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:37 root ollama[600447]: [GIN] 2025/01/15 - 01:51:37 | 200 | 100.904648ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:37 root ollama[600447]: [GIN] 2025/01/15 - 01:51:37 | 200 | 90.890678ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:37 root ollama[600447]: [GIN] 2025/01/15 - 01:51:37 | 200 | 99.928346ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:37 root ollama[600447]: [GIN] 2025/01/15 - 01:51:37 | 200 | 97.063419ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:38 root ollama[600447]: [GIN] 2025/01/15 - 01:51:38 | 200 | 111.292825ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:38 root ollama[600447]: [GIN] 2025/01/15 - 01:51:38 | 200 | 108.619156ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:38 root ollama[600447]: [GIN] 2025/01/15 - 01:51:38 | 200 | 99.37953ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:38 root ollama[600447]: [GIN] 2025/01/15 - 01:51:38 | 200 | 98.20445ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:38 root ollama[600447]: [GIN] 2025/01/15 - 01:51:38 | 200 | 98.707926ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:38 root ollama[600447]: [GIN] 2025/01/15 - 01:51:38 | 200 | 88.752476ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:38 root ollama[600447]: [GIN] 2025/01/15 - 01:51:38 | 200 | 112.797772ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:38 root ollama[600447]: [GIN] 2025/01/15 - 01:51:38 | 200 | 102.311297ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:38 root ollama[600447]: [GIN] 2025/01/15 - 01:51:38 | 200 | 139.486818ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:39 root ollama[600447]: [GIN] 2025/01/15 - 01:51:39 | 200 | 127.102628ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:39 root ollama[600447]: [GIN] 2025/01/15 - 01:51:39 | 200 | 139.483418ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:39 root ollama[600447]: [GIN] 2025/01/15 - 01:51:39 | 200 | 123.809214ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:39 root ollama[600447]: [GIN] 2025/01/15 - 01:51:39 | 200 | 191.102313ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:39 root ollama[600447]: [GIN] 2025/01/15 - 01:51:39 | 200 | 101.694722ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:39 root ollama[600447]: [GIN] 2025/01/15 - 01:51:39 | 200 | 101.93814ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:39 root ollama[600447]: [GIN] 2025/01/15 - 01:51:39 | 200 | 110.100874ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:39 root ollama[600447]: [GIN] 2025/01/15 - 01:51:39 | 200 | 88.756626ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:40 root ollama[600447]: [GIN] 2025/01/15 - 01:51:40 | 200 | 158.063858ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:40 root ollama[600447]: [GIN] 2025/01/15 - 01:51:40 | 200 | 110.996387ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:40 root ollama[600447]: [GIN] 2025/01/15 - 01:51:40 | 200 | 94.43816ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:40 root ollama[600447]: [GIN] 2025/01/15 - 01:51:40 | 200 | 94.639439ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:40 root ollama[600447]: [GIN] 2025/01/15 - 01:51:40 | 200 | 103.956934ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:40 root ollama[600447]: [GIN] 2025/01/15 - 01:51:40 | 200 | 192.558751ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:40 root ollama[600447]: [GIN] 2025/01/15 - 01:51:40 | 200 | 185.712976ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:40 root ollama[600447]: [GIN] 2025/01/15 - 01:51:40 | 200 | 114.622968ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:41 root ollama[600447]: [GIN] 2025/01/15 - 01:51:41 | 200 | 104.139422ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:41 root ollama[600447]: [GIN] 2025/01/15 - 01:51:41 | 200 | 92.839093ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:41 root ollama[600447]: [GIN] 2025/01/15 - 01:51:41 | 200 | 89.171293ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:41 root ollama[600447]: [GIN] 2025/01/15 - 01:51:41 | 200 | 116.477113ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:41 root ollama[600447]: [GIN] 2025/01/15 - 01:51:41 | 200 | 124.529638ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:41 root ollama[600447]: [GIN] 2025/01/15 - 01:51:41 | 200 | 102.592875ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:41 root ollama[600447]: [GIN] 2025/01/15 - 01:51:41 | 200 | 131.942868ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:41 root ollama[600447]: [GIN] 2025/01/15 - 01:51:41 | 200 | 100.027476ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:42 root ollama[600447]: [GIN] 2025/01/15 - 01:51:42 | 200 | 127.99767ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:42 root ollama[600447]: [GIN] 2025/01/15 - 01:51:42 | 200 | 97.735934ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:42 root ollama[600447]: [GIN] 2025/01/15 - 01:51:42 | 200 | 123.194159ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:42 root ollama[600447]: [GIN] 2025/01/15 - 01:51:42 | 200 | 112.736863ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:42 root ollama[600447]: [GIN] 2025/01/15 - 01:51:42 | 200 | 105.301362ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:42 root ollama[600447]: [GIN] 2025/01/15 - 01:51:42 | 200 | 107.965692ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:42 root ollama[600447]: [GIN] 2025/01/15 - 01:51:42 | 200 | 117.156487ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:42 root ollama[600447]: [GIN] 2025/01/15 - 01:51:42 | 200 | 150.285371ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:42 root ollama[600447]: [GIN] 2025/01/15 - 01:51:42 | 200 | 90.237444ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:43 root ollama[600447]: [GIN] 2025/01/15 - 01:51:43 | 200 | 99.070613ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:43 root ollama[600447]: [GIN] 2025/01/15 - 01:51:43 | 200 | 116.380504ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:43 root ollama[600447]: [GIN] 2025/01/15 - 01:51:43 | 200 | 181.222462ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:43 root ollama[600447]: [GIN] 2025/01/15 - 01:51:43 | 200 | 76.402445ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:43 root ollama[600447]: [GIN] 2025/01/15 - 01:51:43 | 200 | 150.172402ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:43 root ollama[600447]: [GIN] 2025/01/15 - 01:51:43 | 200 | 90.905449ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:43 root ollama[600447]: [GIN] 2025/01/15 - 01:51:43 | 200 | 84.185213ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:43 root ollama[600447]: [GIN] 2025/01/15 - 01:51:43 | 200 | 91.831412ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:43 root ollama[600447]: [GIN] 2025/01/15 - 01:51:43 | 200 | 111.500483ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:44 root ollama[600447]: [GIN] 2025/01/15 - 01:51:44 | 200 | 112.181687ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:44 root ollama[600447]: [GIN] 2025/01/15 - 01:51:44 | 200 | 88.815465ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:44 root ollama[600447]: [GIN] 2025/01/15 - 01:51:44 | 200 | 94.596759ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:44 root ollama[600447]: [GIN] 2025/01/15 - 01:51:44 | 200 | 111.128636ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:44 root ollama[600447]: [GIN] 2025/01/15 - 01:51:44 | 200 | 99.275731ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:44 root ollama[600447]: [GIN] 2025/01/15 - 01:51:44 | 200 | 118.895803ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:44 root ollama[600447]: [GIN] 2025/01/15 - 01:51:44 | 200 | 141.69756ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:44 root ollama[600447]: [GIN] 2025/01/15 - 01:51:44 | 200 | 76.226277ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:44 root ollama[600447]: [GIN] 2025/01/15 - 01:51:44 | 200 | 127.757392ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:45 root ollama[600447]: [GIN] 2025/01/15 - 01:51:45 | 200 | 95.433722ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:45 root ollama[600447]: [GIN] 2025/01/15 - 01:51:45 | 200 | 90.72205ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:45 root ollama[600447]: [GIN] 2025/01/15 - 01:51:45 | 200 | 100.862928ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:45 root ollama[600447]: [GIN] 2025/01/15 - 01:51:45 | 200 | 90.499541ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:45 root ollama[600447]: [GIN] 2025/01/15 - 01:51:45 | 200 | 105.60932ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:45 root ollama[600447]: [GIN] 2025/01/15 - 01:51:45 | 200 | 95.321623ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:45 root ollama[600447]: [GIN] 2025/01/15 - 01:51:45 | 200 | 109.058022ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:45 root ollama[600447]: [GIN] 2025/01/15 - 01:51:45 | 200 | 101.764912ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:45 root ollama[600447]: [GIN] 2025/01/15 - 01:51:45 | 200 | 109.41009ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:46 root ollama[600447]: [GIN] 2025/01/15 - 01:51:46 | 200 | 152.284754ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:46 root ollama[600447]: [GIN] 2025/01/15 - 01:51:46 | 200 | 113.810784ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:46 root ollama[600447]: [GIN] 2025/01/15 - 01:51:46 | 200 | 117.360746ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:46 root ollama[600447]: [GIN] 2025/01/15 - 01:51:46 | 200 | 86.713882ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:46 root ollama[600447]: [GIN] 2025/01/15 - 01:51:46 | 200 | 92.426256ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:46 root ollama[600447]: [GIN] 2025/01/15 - 01:51:46 | 200 | 123.703945ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:46 root ollama[600447]: [GIN] 2025/01/15 - 01:51:46 | 200 | 110.411992ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:46 root ollama[600447]: [GIN] 2025/01/15 - 01:51:46 | 200 | 126.374993ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:46 root ollama[600447]: [GIN] 2025/01/15 - 01:51:46 | 200 | 125.019524ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:47 root ollama[600447]: [GIN] 2025/01/15 - 01:51:47 | 200 | 112.387926ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:47 root ollama[600447]: [GIN] 2025/01/15 - 01:51:47 | 200 | 99.222952ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:51:47 root ollama[600447]: [GIN] 2025/01/15 - 01:51:47 | 200 | 115.865238ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 01:55:23 root ollama[600447]: time=2025-01-15T01:55:23.351+08:00 level=WARN source=sched.go:646 msg="gpu VRAM usage didn't recover within timeout" seconds=5.126759295 model=/home/zyk/.ollama/models/blobs/sha256-dde5aa3fc5ffc17176b5e8bdc82f587b24b2678c6c66101bf7da77af9f7ccdff Jan 15 01:55:23 root ollama[600447]: time=2025-01-15T01:55:23.608+08:00 level=WARN source=sched.go:646 msg="gpu VRAM usage didn't recover within timeout" seconds=5.383669568 model=/home/zyk/.ollama/models/blobs/sha256-dde5aa3fc5ffc17176b5e8bdc82f587b24b2678c6c66101bf7da77af9f7ccdff Jan 15 01:55:23 root ollama[600447]: time=2025-01-15T01:55:23.862+08:00 level=WARN source=sched.go:646 msg="gpu VRAM usage didn't recover within timeout" seconds=5.637426687 model=/home/zyk/.ollama/models/blobs/sha256-dde5aa3fc5ffc17176b5e8bdc82f587b24b2678c6c66101bf7da77af9f7ccdff Jan 15 01:56:52 root ollama[600447]: time=2025-01-15T01:56:52.384+08:00 level=WARN source=sched.go:646 msg="gpu VRAM usage didn't recover within timeout" seconds=5.055821746 model=/home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 Jan 15 01:56:52 root ollama[600447]: time=2025-01-15T01:56:52.635+08:00 level=WARN source=sched.go:646 msg="gpu VRAM usage didn't recover within timeout" seconds=5.307517971 model=/home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 Jan 15 01:56:52 root ollama[600447]: time=2025-01-15T01:56:52.886+08:00 level=WARN source=sched.go:646 msg="gpu VRAM usage didn't recover within timeout" seconds=5.557893277 model=/home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.650+08:00 level=WARN source=config.go:215 msg="invalid environment variable, using default" key=OLLAMA_GPU_OVERHEAD value=0.1 default=0 Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.650+08:00 level=INFO source=sched.go:714 msg="new model will fit in available VRAM in single GPU, loading" model=/home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 gpu=GPU-c7a0f0bc-23ca-6391-1b9b-1465cd77cd98 parallel=1 available=41967550464 required="809.9 MiB" Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.989+08:00 level=INFO source=server.go:104 msg="system memory" total="503.7 GiB" free="448.6 GiB" free_swap="228.0 KiB" Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.989+08:00 level=WARN source=config.go:215 msg="invalid environment variable, using default" key=OLLAMA_GPU_OVERHEAD value=0.1 default=0 Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.989+08:00 level=WARN source=config.go:215 msg="invalid environment variable, using default" key=OLLAMA_GPU_OVERHEAD value=0.1 default=0 Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.989+08:00 level=INFO source=memory.go:356 msg="offload to cuda" layers.requested=-1 layers.model=13 layers.offload=13 layers.split="" memory.available="[39.1 GiB]" memory.gpu_overhead="0 B" memory.required.full="809.9 MiB" memory.required.partial="809.9 MiB" memory.required.kv="24.0 MiB" memory.required.allocations="[809.9 MiB]" memory.weights.total="240.1 MiB" memory.weights.repeating="195.4 MiB" memory.weights.nonrepeating="44.7 MiB" memory.graph.full="48.0 MiB" memory.graph.partial="48.0 MiB" Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.989+08:00 level=INFO source=server.go:158 msg="Invalid OLLAMA_LLM_LIBRARY cuda - not found" Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.989+08:00 level=INFO source=server.go:376 msg="starting llama server" cmd="/usr/local/bin/ollama runner --model /home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 --ctx-size 8192 --batch-size 512 --n-gpu-layers 13 --threads 96 --parallel 1 --port 45499" Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.990+08:00 level=INFO source=sched.go:449 msg="loaded runners" count=1 Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.990+08:00 level=INFO source=server.go:555 msg="waiting for llama runner to start responding" Jan 15 02:11:18 root ollama[600447]: time=2025-01-15T02:11:18.990+08:00 level=INFO source=server.go:589 msg="waiting for server to become available" status="llm server error" Jan 15 02:11:19 root ollama[600447]: 2025/01/15 02:11:19 config.go:215: WARN invalid environment variable, using default key=OLLAMA_GPU_OVERHEAD value=0.1 default=0 Jan 15 02:11:19 root ollama[600447]: time=2025-01-15T02:11:19.013+08:00 level=INFO source=runner.go:945 msg="starting go runner" Jan 15 02:11:19 root ollama[600447]: time=2025-01-15T02:11:19.013+08:00 level=INFO source=runner.go:946 msg=system info="CPU : LLAMAFILE = 1 | AARCH64_REPACK = 1 | CPU : LLAMAFILE = 1 | AARCH64_REPACK = 1 | cgo(gcc)" threads=96 Jan 15 02:11:19 root ollama[600447]: time=2025-01-15T02:11:19.013+08:00 level=INFO source=runner.go:1004 msg="Server listening on 127.0.0.1:45499" Jan 15 02:11:19 root ollama[600447]: llama_model_loader: loaded meta data with 24 key-value pairs and 112 tensors from /home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 (version GGUF V3 (latest)) Jan 15 02:11:19 root ollama[600447]: llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 0: general.architecture str = nomic-bert Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 1: general.name str = nomic-embed-text-v1.5 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 2: nomic-bert.block_count u32 = 12 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 3: nomic-bert.context_length u32 = 2048 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 4: nomic-bert.embedding_length u32 = 768 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 5: nomic-bert.feed_forward_length u32 = 3072 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 6: nomic-bert.attention.head_count u32 = 12 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 7: nomic-bert.attention.layer_norm_epsilon f32 = 0.000000 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 8: general.file_type u32 = 1 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 9: nomic-bert.attention.causal bool = false Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 10: nomic-bert.pooling_type u32 = 1 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 11: nomic-bert.rope.freq_base f32 = 1000.000000 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 12: tokenizer.ggml.token_type_count u32 = 2 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 13: tokenizer.ggml.bos_token_id u32 = 101 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 14: tokenizer.ggml.eos_token_id u32 = 102 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 15: tokenizer.ggml.model str = bert Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 16: tokenizer.ggml.tokens arr[str,30522] = ["[PAD]", "[unused0]", "[unused1]", "... Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 17: tokenizer.ggml.scores arr[f32,30522] = [-1000.000000, -1000.000000, -1000.00... Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 18: tokenizer.ggml.token_type arr[i32,30522] = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 19: tokenizer.ggml.unknown_token_id u32 = 100 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 20: tokenizer.ggml.seperator_token_id u32 = 102 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 21: tokenizer.ggml.padding_token_id u32 = 0 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 22: tokenizer.ggml.cls_token_id u32 = 101 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - kv 23: tokenizer.ggml.mask_token_id u32 = 103 Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - type f32: 51 tensors Jan 15 02:11:19 root ollama[600447]: llama_model_loader: - type f16: 61 tensors Jan 15 02:11:19 root ollama[600447]: llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect Jan 15 02:11:19 root ollama[600447]: llm_load_vocab: special tokens cache size = 5 Jan 15 02:11:19 root ollama[600447]: llm_load_vocab: token to piece cache size = 0.2032 MB Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: format = GGUF V3 (latest) Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: arch = nomic-bert Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: vocab type = WPM Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_vocab = 30522 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_merges = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: vocab_only = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_ctx_train = 2048 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_embd = 768 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_layer = 12 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_head = 12 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_head_kv = 12 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_rot = 64 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_swa = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_embd_head_k = 64 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_embd_head_v = 64 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_gqa = 1 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_embd_k_gqa = 768 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_embd_v_gqa = 768 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: f_norm_eps = 1.0e-12 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: f_norm_rms_eps = 0.0e+00 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: f_clamp_kqv = 0.0e+00 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: f_max_alibi_bias = 0.0e+00 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: f_logit_scale = 0.0e+00 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_ff = 3072 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_expert = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_expert_used = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: causal attn = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: pooling type = 1 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: rope type = 2 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: rope scaling = linear Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: freq_base_train = 1000.0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: freq_scale_train = 1 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: n_ctx_orig_yarn = 2048 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: rope_finetuned = unknown Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: ssm_d_conv = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: ssm_d_inner = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: ssm_d_state = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: ssm_dt_rank = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: ssm_dt_b_c_rms = 0 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: model type = 137M Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: model ftype = F16 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: model params = 136.73 M Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: model size = 260.86 MiB (16.00 BPW) Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: general.name = nomic-embed-text-v1.5 Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: BOS token = 101 '[CLS]' Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: EOS token = 102 '[SEP]' Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: UNK token = 100 '[UNK]' Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: SEP token = 102 '[SEP]' Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: PAD token = 0 '[PAD]' Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: CLS token = 101 '[CLS]' Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: MASK token = 103 '[MASK]' Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: LF token = 0 '[PAD]' Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: EOG token = 102 '[SEP]' Jan 15 02:11:19 root ollama[600447]: llm_load_print_meta: max token length = 21 Jan 15 02:11:19 root ollama[600447]: llm_load_tensors: CPU_Mapped model buffer size = 260.86 MiB Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: n_seq_max = 1 Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: n_ctx = 8192 Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: n_ctx_per_seq = 8192 Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: n_batch = 512 Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: n_ubatch = 512 Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: flash_attn = 0 Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: freq_base = 1000.0 Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: freq_scale = 1 Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: n_ctx_pre_seq (8192) > n_ctx_train (2048) -- possible training context overflow Jan 15 02:11:19 root ollama[600447]: time=2025-01-15T02:11:19.244+08:00 level=INFO source=server.go:589 msg="waiting for server to become available" status="llm server loading model" Jan 15 02:11:19 root ollama[600447]: llama_kv_cache_init: CPU KV buffer size = 288.00 MiB Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: KV self size = 288.00 MiB, K (f16): 144.00 MiB, V (f16): 144.00 MiB Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: CPU output buffer size = 0.00 MiB Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: CPU compute buffer size = 23.00 MiB Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: graph nodes = 453 Jan 15 02:11:19 root ollama[600447]: llama_new_context_with_model: graph splits = 1 Jan 15 02:11:19 root ollama[600447]: time=2025-01-15T02:11:19.495+08:00 level=INFO source=server.go:594 msg="llama runner started in 0.50 seconds" Jan 15 02:11:19 root ollama[600447]: [GIN] 2025/01/15 - 02:11:19 | 200 | 1.338283813s | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:21 root ollama[600447]: [GIN] 2025/01/15 - 02:11:21 | 200 | 98.380039ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:22 root ollama[600447]: [GIN] 2025/01/15 - 02:11:22 | 200 | 60.265805ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:23 root ollama[600447]: [GIN] 2025/01/15 - 02:11:23 | 200 | 65.985629ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:24 root ollama[600447]: [GIN] 2025/01/15 - 02:11:24 | 200 | 68.056552ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:25 root ollama[600447]: [GIN] 2025/01/15 - 02:11:25 | 200 | 78.546718ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:27 root ollama[600447]: [GIN] 2025/01/15 - 02:11:27 | 200 | 66.586444ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:28 root ollama[600447]: [GIN] 2025/01/15 - 02:11:28 | 200 | 71.366315ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:29 root ollama[600447]: [GIN] 2025/01/15 - 02:11:29 | 200 | 65.141826ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:30 root ollama[600447]: [GIN] 2025/01/15 - 02:11:30 | 200 | 83.953735ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:31 root ollama[600447]: [GIN] 2025/01/15 - 02:11:31 | 200 | 75.937529ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:32 root ollama[600447]: [GIN] 2025/01/15 - 02:11:32 | 200 | 60.692432ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:34 root ollama[600447]: [GIN] 2025/01/15 - 02:11:34 | 200 | 79.5133ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:35 root ollama[600447]: [GIN] 2025/01/15 - 02:11:35 | 200 | 89.903977ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:36 root ollama[600447]: [GIN] 2025/01/15 - 02:11:36 | 200 | 84.803997ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:37 root ollama[600447]: [GIN] 2025/01/15 - 02:11:37 | 200 | 60.404694ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:38 root ollama[600447]: [GIN] 2025/01/15 - 02:11:38 | 200 | 60.700402ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:40 root ollama[600447]: [GIN] 2025/01/15 - 02:11:40 | 200 | 70.254765ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:41 root ollama[600447]: [GIN] 2025/01/15 - 02:11:41 | 200 | 95.487742ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:42 root ollama[600447]: [GIN] 2025/01/15 - 02:11:42 | 200 | 62.429248ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:43 root ollama[600447]: [GIN] 2025/01/15 - 02:11:43 | 200 | 104.233301ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:44 root ollama[600447]: [GIN] 2025/01/15 - 02:11:44 | 200 | 69.758129ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:46 root ollama[600447]: [GIN] 2025/01/15 - 02:11:46 | 200 | 86.355095ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:47 root ollama[600447]: [GIN] 2025/01/15 - 02:11:47 | 200 | 75.126125ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:48 root ollama[600447]: [GIN] 2025/01/15 - 02:11:48 | 200 | 85.859989ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:49 root ollama[600447]: [GIN] 2025/01/15 - 02:11:49 | 200 | 69.938297ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:51 root ollama[600447]: [GIN] 2025/01/15 - 02:11:51 | 200 | 63.951375ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:52 root ollama[600447]: [GIN] 2025/01/15 - 02:11:52 | 200 | 66.710664ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:53 root ollama[600447]: [GIN] 2025/01/15 - 02:11:53 | 200 | 78.109082ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:54 root ollama[600447]: [GIN] 2025/01/15 - 02:11:54 | 200 | 67.17403ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:55 root ollama[600447]: [GIN] 2025/01/15 - 02:11:55 | 200 | 57.723965ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:57 root ollama[600447]: [GIN] 2025/01/15 - 02:11:57 | 200 | 71.506035ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:58 root ollama[600447]: [GIN] 2025/01/15 - 02:11:58 | 200 | 70.989889ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:11:59 root ollama[600447]: [GIN] 2025/01/15 - 02:11:59 | 200 | 61.695213ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:00 root ollama[600447]: [GIN] 2025/01/15 - 02:12:00 | 200 | 64.223903ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:01 root ollama[600447]: [GIN] 2025/01/15 - 02:12:01 | 200 | 58.385951ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:03 root ollama[600447]: [GIN] 2025/01/15 - 02:12:03 | 200 | 66.473285ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:04 root ollama[600447]: [GIN] 2025/01/15 - 02:12:04 | 200 | 65.094797ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:05 root ollama[600447]: [GIN] 2025/01/15 - 02:12:05 | 200 | 58.699298ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:06 root ollama[600447]: [GIN] 2025/01/15 - 02:12:06 | 200 | 92.308887ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:08 root ollama[600447]: [GIN] 2025/01/15 - 02:12:08 | 200 | 69.901188ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:09 root ollama[600447]: [GIN] 2025/01/15 - 02:12:09 | 200 | 85.517992ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:10 root ollama[600447]: [GIN] 2025/01/15 - 02:12:10 | 200 | 71.162887ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:11 root ollama[600447]: [GIN] 2025/01/15 - 02:12:11 | 200 | 60.978389ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:13 root ollama[600447]: [GIN] 2025/01/15 - 02:12:13 | 200 | 72.930583ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:14 root ollama[600447]: [GIN] 2025/01/15 - 02:12:14 | 200 | 73.36181ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:15 root ollama[600447]: [GIN] 2025/01/15 - 02:12:15 | 200 | 72.03733ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:12:16 root ollama[600447]: [GIN] 2025/01/15 - 02:12:16 | 200 | 66.177347ms | 127.0.0.1 | POST "/api/embeddings" Jan 15 02:17:21 root ollama[600447]: time=2025-01-15T02:17:21.795+08:00 level=WARN source=sched.go:646 msg="gpu VRAM usage didn't recover within timeout" seconds=5.001379244 model=/home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 Jan 15 02:17:22 root ollama[600447]: time=2025-01-15T02:17:22.044+08:00 level=WARN source=sched.go:646 msg="gpu VRAM usage didn't recover within timeout" seconds=5.250981796 model=/home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 Jan 15 02:17:22 root ollama[600447]: time=2025-01-15T02:17:22.293+08:00 level=WARN source=sched.go:646 msg="gpu VRAM usage didn't recover within timeout" seconds=5.499944393 model=/home/zyk/.ollama/models/blobs/sha256-970aa74c0a90ef7482477cf803618e776e173c007bf957f635f1015bfcfef0e6 ### OS Linux ### GPU Nvidia ### CPU AMD ### Ollama version 0.5.4
{ "login": "watashiwastar-yun", "id": 188650638, "node_id": "U_kgDOCz6Ujg", "avatar_url": "https://avatars.githubusercontent.com/u/188650638?v=4", "gravatar_id": "", "url": "https://api.github.com/users/watashiwastar-yun", "html_url": "https://github.com/watashiwastar-yun", "followers_url": "https://api.github.com/users/watashiwastar-yun/followers", "following_url": "https://api.github.com/users/watashiwastar-yun/following{/other_user}", "gists_url": "https://api.github.com/users/watashiwastar-yun/gists{/gist_id}", "starred_url": "https://api.github.com/users/watashiwastar-yun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/watashiwastar-yun/subscriptions", "organizations_url": "https://api.github.com/users/watashiwastar-yun/orgs", "repos_url": "https://api.github.com/users/watashiwastar-yun/repos", "events_url": "https://api.github.com/users/watashiwastar-yun/events{/privacy}", "received_events_url": "https://api.github.com/users/watashiwastar-yun/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8425/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8425/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3331
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3331/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3331/comments
https://api.github.com/repos/ollama/ollama/issues/3331/events
https://github.com/ollama/ollama/pull/3331
2,204,621,276
PR_kwDOJ0Z1Ps5qmPbn
3,331
Integration tests conditionally pull
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-03-24T23:44:05
2024-03-25T19:48:55
2024-03-25T19:48:52
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3331", "html_url": "https://github.com/ollama/ollama/pull/3331", "diff_url": "https://github.com/ollama/ollama/pull/3331.diff", "patch_url": "https://github.com/ollama/ollama/pull/3331.patch", "merged_at": "2024-03-25T19:48:52" }
If images aren't present, pull them. Also fixes the expected responses
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3331/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3331/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/713
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/713/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/713/comments
https://api.github.com/repos/ollama/ollama/issues/713/events
https://github.com/ollama/ollama/issues/713
1,929,063,501
I_kwDOJ0Z1Ps5y-yxN
713
Using ollama with llm-ls
{ "login": "noahbald", "id": 36181524, "node_id": "MDQ6VXNlcjM2MTgxNTI0", "avatar_url": "https://avatars.githubusercontent.com/u/36181524?v=4", "gravatar_id": "", "url": "https://api.github.com/users/noahbald", "html_url": "https://github.com/noahbald", "followers_url": "https://api.github.com/users/noahbald/followers", "following_url": "https://api.github.com/users/noahbald/following{/other_user}", "gists_url": "https://api.github.com/users/noahbald/gists{/gist_id}", "starred_url": "https://api.github.com/users/noahbald/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/noahbald/subscriptions", "organizations_url": "https://api.github.com/users/noahbald/orgs", "repos_url": "https://api.github.com/users/noahbald/repos", "events_url": "https://api.github.com/users/noahbald/events{/privacy}", "received_events_url": "https://api.github.com/users/noahbald/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2023-10-05T21:09:26
2023-10-25T21:34:55
2023-10-25T21:34:55
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I've been trying to setup ollama to use codellama with FIM in my editor with nvim.llm and llm-ls. As suggested in the ollama docs, this is what the locally running API may expect as a FIM request. ```sh curl -X POST http://localhost:11434/api/generate -d '{ "model": "codellama:7b-code", "prompt": "<PRE> def compute_gcd(x, y): <SUF>return result <MID>" }' ``` However, when using llm-ls we can't (afaik) tell it how to structure the data it sends. It ends up sending a request that looks more like this ```sh curl -X POST http://localhost:11434/api/generate -d '{ "inputs": "<PRE> def compute_gcd(x, y): <SUF>return result <MID>", "parameters": { ... }, }' ``` Has anyone found a better way to set this up? Is there an alternative api endpoint we can use? Something like `/api/generate/codellama:7b-code?src=llm-ls`?
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/713/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/713/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/6327
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6327/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6327/comments
https://api.github.com/repos/ollama/ollama/issues/6327/events
https://github.com/ollama/ollama/pull/6327
2,461,920,588
PR_kwDOJ0Z1Ps54KDhn
6,327
convert safetensor adapters into GGUF
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-08-12T21:24:33
2024-08-23T18:29:58
2024-08-23T18:29:56
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6327", "html_url": "https://github.com/ollama/ollama/pull/6327", "diff_url": "https://github.com/ollama/ollama/pull/6327.diff", "patch_url": "https://github.com/ollama/ollama/pull/6327.patch", "merged_at": "2024-08-23T18:29:56" }
This change converts a Safetensors based LoRA into GGUF and ties it w/ a base model. Only llama2/llama3/mistral/gemma2 will work initially. You can create the Modelfile to look like: ``` FROM llama3 ADAPTER /path/to/my/safetensor/adapter/directory ``` I'll add in some tests, but wanted to get this out so people could try it out. Replaces #5524
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6327/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6327/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2547
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2547/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2547/comments
https://api.github.com/repos/ollama/ollama/issues/2547/events
https://github.com/ollama/ollama/issues/2547
2,139,186,178
I_kwDOJ0Z1Ps5_gWQC
2,547
Dynamically determine context window at runtime
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
1
2024-02-16T18:33:48
2024-11-17T22:25:01
null
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
null
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2547/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2547/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/542
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/542/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/542/comments
https://api.github.com/repos/ollama/ollama/issues/542/events
https://github.com/ollama/ollama/issues/542
1,899,551,212
I_kwDOJ0Z1Ps5xONns
542
Creating new models
{ "login": "erlebach", "id": 324708, "node_id": "MDQ6VXNlcjMyNDcwOA==", "avatar_url": "https://avatars.githubusercontent.com/u/324708?v=4", "gravatar_id": "", "url": "https://api.github.com/users/erlebach", "html_url": "https://github.com/erlebach", "followers_url": "https://api.github.com/users/erlebach/followers", "following_url": "https://api.github.com/users/erlebach/following{/other_user}", "gists_url": "https://api.github.com/users/erlebach/gists{/gist_id}", "starred_url": "https://api.github.com/users/erlebach/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/erlebach/subscriptions", "organizations_url": "https://api.github.com/users/erlebach/orgs", "repos_url": "https://api.github.com/users/erlebach/repos", "events_url": "https://api.github.com/users/erlebach/events{/privacy}", "received_events_url": "https://api.github.com/users/erlebach/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2023-09-16T19:57:33
2023-09-27T22:09:39
2023-09-26T22:30:34
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
In the docs, we find: ``` ### Customize a model Pull a base model: ``` ollama pull llama2 ``` Create a `Modelfile`: ``` FROM llama2 # set the temperature to 1 [higher is more creative, lower is more coherent] PARAMETER temperature 1 # set the system prompt SYSTEM """ You are Mario from Super Mario Bros. Answer as Mario, the assistant, only. """ ``` Next, create and run the model: ``` ollama create mario -f ./Modelfile ollama run Mario ``` When changing the context length and/or temperature of this model (i.e., llama2), what actually happens? Is the model downloaded again (it seems that way.) Why is that? I want to create a series of models with different context lengths and temperatures and would like to modify the models already downloaded. Perhaps that is not possible? When using `llama.cpp`, it is possible to modify the temperature and create shorter context lengths without recreating the model again. What exactly is `ollama` doing? Thanks.
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/542/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/542/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/5282
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5282/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5282/comments
https://api.github.com/repos/ollama/ollama/issues/5282/events
https://github.com/ollama/ollama/pull/5282
2,373,645,578
PR_kwDOJ0Z1Ps5zjGy9
5,282
Docs for `api/embed`
{ "login": "royjhan", "id": 65097070, "node_id": "MDQ6VXNlcjY1MDk3MDcw", "avatar_url": "https://avatars.githubusercontent.com/u/65097070?v=4", "gravatar_id": "", "url": "https://api.github.com/users/royjhan", "html_url": "https://github.com/royjhan", "followers_url": "https://api.github.com/users/royjhan/followers", "following_url": "https://api.github.com/users/royjhan/following{/other_user}", "gists_url": "https://api.github.com/users/royjhan/gists{/gist_id}", "starred_url": "https://api.github.com/users/royjhan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/royjhan/subscriptions", "organizations_url": "https://api.github.com/users/royjhan/orgs", "repos_url": "https://api.github.com/users/royjhan/repos", "events_url": "https://api.github.com/users/royjhan/events{/privacy}", "received_events_url": "https://api.github.com/users/royjhan/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-06-25T20:56:28
2024-07-22T20:37:10
2024-07-22T20:37:08
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5282", "html_url": "https://github.com/ollama/ollama/pull/5282", "diff_url": "https://github.com/ollama/ollama/pull/5282.diff", "patch_url": "https://github.com/ollama/ollama/pull/5282.patch", "merged_at": "2024-07-22T20:37:08" }
Waiting on #5127
{ "login": "royjhan", "id": 65097070, "node_id": "MDQ6VXNlcjY1MDk3MDcw", "avatar_url": "https://avatars.githubusercontent.com/u/65097070?v=4", "gravatar_id": "", "url": "https://api.github.com/users/royjhan", "html_url": "https://github.com/royjhan", "followers_url": "https://api.github.com/users/royjhan/followers", "following_url": "https://api.github.com/users/royjhan/following{/other_user}", "gists_url": "https://api.github.com/users/royjhan/gists{/gist_id}", "starred_url": "https://api.github.com/users/royjhan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/royjhan/subscriptions", "organizations_url": "https://api.github.com/users/royjhan/orgs", "repos_url": "https://api.github.com/users/royjhan/repos", "events_url": "https://api.github.com/users/royjhan/events{/privacy}", "received_events_url": "https://api.github.com/users/royjhan/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5282/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5282/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3336
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3336/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3336/comments
https://api.github.com/repos/ollama/ollama/issues/3336/events
https://github.com/ollama/ollama/issues/3336
2,205,087,465
I_kwDOJ0Z1Ps6Dbvbp
3,336
ollama.ai certificate has expired, not possible to download models
{ "login": "psy-q", "id": 87557, "node_id": "MDQ6VXNlcjg3NTU3", "avatar_url": "https://avatars.githubusercontent.com/u/87557?v=4", "gravatar_id": "", "url": "https://api.github.com/users/psy-q", "html_url": "https://github.com/psy-q", "followers_url": "https://api.github.com/users/psy-q/followers", "following_url": "https://api.github.com/users/psy-q/following{/other_user}", "gists_url": "https://api.github.com/users/psy-q/gists{/gist_id}", "starred_url": "https://api.github.com/users/psy-q/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/psy-q/subscriptions", "organizations_url": "https://api.github.com/users/psy-q/orgs", "repos_url": "https://api.github.com/users/psy-q/repos", "events_url": "https://api.github.com/users/psy-q/events{/privacy}", "received_events_url": "https://api.github.com/users/psy-q/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
81
2024-03-25T07:32:52
2024-06-28T01:42:40
2024-03-25T20:54:32
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? The ollama.ai certificate has expired today, ollama now can't download models: ``` ollama run mistral pulling manifest Error: pull model manifest: Get "https://registry.ollama.ai/v2/library/mistral/manifests/latest": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2024-03-25T08:28:33+01:00 is after 2024-03-25T07:17:47Z ``` ### What did you expect to see? You should be able to pull models using the `ollama` command. ### Steps to reproduce Try to pull a model. ### Are there any recent changes that introduced the issue? _No response_ ### OS Linux ### Architecture amd64 ### Platform _No response_ ### Ollama version 0.1.29 ### GPU Nvidia ### GPU info _No response_ ### CPU _No response_ ### Other software _No response_
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3336/reactions", "total_count": 100, "+1": 90, "-1": 0, "laugh": 0, "hooray": 0, "confused": 4, "heart": 0, "rocket": 0, "eyes": 6 }
https://api.github.com/repos/ollama/ollama/issues/3336/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6550
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6550/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6550/comments
https://api.github.com/repos/ollama/ollama/issues/6550/events
https://github.com/ollama/ollama/issues/6550
2,493,590,123
I_kwDOJ0Z1Ps6UoSpr
6,550
Cannot download models behind a proxy in docker ollama.
{ "login": "lakshmikanthgr", "id": 12883743, "node_id": "MDQ6VXNlcjEyODgzNzQz", "avatar_url": "https://avatars.githubusercontent.com/u/12883743?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lakshmikanthgr", "html_url": "https://github.com/lakshmikanthgr", "followers_url": "https://api.github.com/users/lakshmikanthgr/followers", "following_url": "https://api.github.com/users/lakshmikanthgr/following{/other_user}", "gists_url": "https://api.github.com/users/lakshmikanthgr/gists{/gist_id}", "starred_url": "https://api.github.com/users/lakshmikanthgr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lakshmikanthgr/subscriptions", "organizations_url": "https://api.github.com/users/lakshmikanthgr/orgs", "repos_url": "https://api.github.com/users/lakshmikanthgr/repos", "events_url": "https://api.github.com/users/lakshmikanthgr/events{/privacy}", "received_events_url": "https://api.github.com/users/lakshmikanthgr/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
10
2024-08-29T06:47:11
2024-09-22T22:59:00
2024-08-29T13:28:26
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Able to run the ollama in docker container in my high end machine which has ubuntu os. But i am not able to pull any model. I am getting ![image](https://github.com/user-attachments/assets/dd271879-a5d2-43d7-b522-6b99718ac54d) ### OS Docker ### GPU AMD ### CPU Intel ### Ollama version _No response_
{ "login": "lakshmikanthgr", "id": 12883743, "node_id": "MDQ6VXNlcjEyODgzNzQz", "avatar_url": "https://avatars.githubusercontent.com/u/12883743?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lakshmikanthgr", "html_url": "https://github.com/lakshmikanthgr", "followers_url": "https://api.github.com/users/lakshmikanthgr/followers", "following_url": "https://api.github.com/users/lakshmikanthgr/following{/other_user}", "gists_url": "https://api.github.com/users/lakshmikanthgr/gists{/gist_id}", "starred_url": "https://api.github.com/users/lakshmikanthgr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lakshmikanthgr/subscriptions", "organizations_url": "https://api.github.com/users/lakshmikanthgr/orgs", "repos_url": "https://api.github.com/users/lakshmikanthgr/repos", "events_url": "https://api.github.com/users/lakshmikanthgr/events{/privacy}", "received_events_url": "https://api.github.com/users/lakshmikanthgr/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6550/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6550/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3278
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3278/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3278/comments
https://api.github.com/repos/ollama/ollama/issues/3278/events
https://github.com/ollama/ollama/pull/3278
2,199,171,144
PR_kwDOJ0Z1Ps5qTzwj
3,278
Enabling ollama to run on Intel GPUs with SYCL backend
{ "login": "zhewang1-intc", "id": 72838274, "node_id": "MDQ6VXNlcjcyODM4Mjc0", "avatar_url": "https://avatars.githubusercontent.com/u/72838274?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zhewang1-intc", "html_url": "https://github.com/zhewang1-intc", "followers_url": "https://api.github.com/users/zhewang1-intc/followers", "following_url": "https://api.github.com/users/zhewang1-intc/following{/other_user}", "gists_url": "https://api.github.com/users/zhewang1-intc/gists{/gist_id}", "starred_url": "https://api.github.com/users/zhewang1-intc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zhewang1-intc/subscriptions", "organizations_url": "https://api.github.com/users/zhewang1-intc/orgs", "repos_url": "https://api.github.com/users/zhewang1-intc/repos", "events_url": "https://api.github.com/users/zhewang1-intc/events{/privacy}", "received_events_url": "https://api.github.com/users/zhewang1-intc/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
12
2024-03-21T05:44:14
2024-12-21T00:48:47
2024-05-28T23:30:50
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3278", "html_url": "https://github.com/ollama/ollama/pull/3278", "diff_url": "https://github.com/ollama/ollama/pull/3278.diff", "patch_url": "https://github.com/ollama/ollama/pull/3278.patch", "merged_at": "2024-05-28T23:30:50" }
Hi, I am submitting this pr to enable ollama to run on Intel GPUs with SYCL as the backend. This pr was [originally](https://github.com/ollama/ollama/pull/2458) started by @felipeagc who is currently unable to actively participate due to relocation. The original pr had fallen behind the main branch, making it inconvenient for maintainers @mxyng @jmorganca @dhiltgen to review. Therefore, I rebased the latest main branch and opened this new pull request. I have verified that it works correctly on Ubuntu 22.04 with ARC 770 GPU. While I am not very familiar with this project and I welcome any guidance and assistance from the community. Let’s work together to make ollama support Intel GPU platforms. cc:@hshen14 @kevinintel @airmeng UPDATE: works well on windows10 + ARC 770 UPDATE: works well on oneapi-docker-image(oneapi-basekit-Ubuntu22.04) + ARC770
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3278/reactions", "total_count": 15, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 15, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3278/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/356
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/356/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/356/comments
https://api.github.com/repos/ollama/ollama/issues/356/events
https://github.com/ollama/ollama/issues/356
1,852,260,461
I_kwDOJ0Z1Ps5uZ0Bt
356
Undefined symbols during go build
{ "login": "drusepth", "id": 538235, "node_id": "MDQ6VXNlcjUzODIzNQ==", "avatar_url": "https://avatars.githubusercontent.com/u/538235?v=4", "gravatar_id": "", "url": "https://api.github.com/users/drusepth", "html_url": "https://github.com/drusepth", "followers_url": "https://api.github.com/users/drusepth/followers", "following_url": "https://api.github.com/users/drusepth/following{/other_user}", "gists_url": "https://api.github.com/users/drusepth/gists{/gist_id}", "starred_url": "https://api.github.com/users/drusepth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/drusepth/subscriptions", "organizations_url": "https://api.github.com/users/drusepth/orgs", "repos_url": "https://api.github.com/users/drusepth/repos", "events_url": "https://api.github.com/users/drusepth/events{/privacy}", "received_events_url": "https://api.github.com/users/drusepth/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
5
2023-08-15T23:02:19
2023-08-16T01:56:08
2023-08-16T01:56:07
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Trying to build on a fresh Ubuntu 22 instance: ```console ubuntu@machine:~/ollama$ go version go version go1.21.0 linux/amd64 ubuntu@machine:~/ollama$ go build . go: downloading github.com/chzyer/readline v1.5.1 go: downloading github.com/dustin/go-humanize v1.0.1 go: downloading github.com/olekukonko/tablewriter v0.0.5 go: downloading github.com/spf13/cobra v1.7.0 go: downloading golang.org/x/crypto v0.10.0 go: downloading github.com/mattn/go-runewidth v0.0.14 go: downloading github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db go: downloading golang.org/x/term v0.10.0 go: downloading github.com/gin-contrib/cors v1.4.0 go: downloading github.com/gin-gonic/gin v1.9.1 go: downloading gonum.org/v1/gonum v0.13.0 go: downloading github.com/spf13/pflag v1.0.5 go: downloading github.com/rivo/uniseg v0.2.0 go: downloading golang.org/x/sys v0.10.0 go: downloading github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 go: downloading github.com/gin-contrib/sse v0.1.0 go: downloading github.com/mattn/go-isatty v0.0.19 go: downloading golang.org/x/net v0.10.0 go: downloading github.com/go-playground/validator/v10 v10.14.0 go: downloading github.com/pelletier/go-toml/v2 v2.0.8 go: downloading github.com/ugorji/go/codec v1.2.11 go: downloading google.golang.org/protobuf v1.30.0 go: downloading gopkg.in/yaml.v3 v3.0.1 go: downloading github.com/gabriel-vasile/mimetype v1.4.2 go: downloading github.com/go-playground/universal-translator v0.18.1 go: downloading github.com/leodido/go-urn v1.2.4 go: downloading golang.org/x/text v0.10.0 go: downloading github.com/go-playground/locales v0.14.1 # github.com/jmorganca/ollama/llm llm/ggml.go:49:2: undefined: llamaHyperparameters llm/ggml.go:177:34: ggml.NumLayer undefined (type GGML has no field or method NumLayer) llm/llm.go:38:14: ggml.FileType undefined (type *GGML has no field or method FileType) llm/llm.go:70:10: undefined: newLlama ubuntu@machine:~/ollama$ go build . # github.com/jmorganca/ollama/llm llm/ggml.go:49:2: undefined: llamaHyperparameters llm/ggml.go:177:34: ggml.NumLayer undefined (type GGML has no field or method NumLayer) llm/llm.go:38:14: ggml.FileType undefined (type *GGML has no field or method FileType) llm/llm.go:70:10: undefined: newLlama ubuntu@machine:~/ollama$ ls Dockerfile README.md app docs format go.sum main.go progressbar server LICENSE api cmd examples go.mod llm parser scripts vector ``` Happy to provide any other info that would be helpful. I didn't see any install instructions other than just building from source so I feel like I'm probably just missing a dependency or something. Any assistance would be much appreciated!
{ "login": "drusepth", "id": 538235, "node_id": "MDQ6VXNlcjUzODIzNQ==", "avatar_url": "https://avatars.githubusercontent.com/u/538235?v=4", "gravatar_id": "", "url": "https://api.github.com/users/drusepth", "html_url": "https://github.com/drusepth", "followers_url": "https://api.github.com/users/drusepth/followers", "following_url": "https://api.github.com/users/drusepth/following{/other_user}", "gists_url": "https://api.github.com/users/drusepth/gists{/gist_id}", "starred_url": "https://api.github.com/users/drusepth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/drusepth/subscriptions", "organizations_url": "https://api.github.com/users/drusepth/orgs", "repos_url": "https://api.github.com/users/drusepth/repos", "events_url": "https://api.github.com/users/drusepth/events{/privacy}", "received_events_url": "https://api.github.com/users/drusepth/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/356/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/356/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8384
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8384/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8384/comments
https://api.github.com/repos/ollama/ollama/issues/8384/events
https://github.com/ollama/ollama/issues/8384
2,781,737,542
I_kwDOJ0Z1Ps6lzfJG
8,384
Unable to acess ollama model hosted on a raspberry pi 5 from an other device
{ "login": "Simonko-912", "id": 179495001, "node_id": "U_kgDOCrLgWQ", "avatar_url": "https://avatars.githubusercontent.com/u/179495001?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Simonko-912", "html_url": "https://github.com/Simonko-912", "followers_url": "https://api.github.com/users/Simonko-912/followers", "following_url": "https://api.github.com/users/Simonko-912/following{/other_user}", "gists_url": "https://api.github.com/users/Simonko-912/gists{/gist_id}", "starred_url": "https://api.github.com/users/Simonko-912/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Simonko-912/subscriptions", "organizations_url": "https://api.github.com/users/Simonko-912/orgs", "repos_url": "https://api.github.com/users/Simonko-912/repos", "events_url": "https://api.github.com/users/Simonko-912/events{/privacy}", "received_events_url": "https://api.github.com/users/Simonko-912/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6677367769, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCL2Q", "url": "https://api.github.com/repos/ollama/ollama/labels/needs%20more%20info", "name": "needs more info", "color": "BA8041", "default": false, "description": "More information is needed to assist" } ]
closed
false
null
[]
null
12
2025-01-11T09:52:30
2025-01-28T21:11:13
2025-01-28T21:11:13
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When i access the ai model from the raspberry pi it works but when i try it with the correct ip and port i cant conect. (page assist error) Unable to connect to Ollama 🦙 I tryed changeing the firewall setings still didnt work raspberry pi model cat: /sys/firmware/devicetree/model: No such file or directory linux version Linux version 6.6.62+rpt-rpi-v8 (serge@raspberrypi.com) (gcc-12 (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40) #1 SMP PREEMPT Debian 1:6.6.62-1+rpt1 (2024-11-25) sorry for my english im not a native speaker ### OS Linux ### GPU Other ### CPU Other ### Ollama version 0.5.4
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8384/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8384/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/153
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/153/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/153/comments
https://api.github.com/repos/ollama/ollama/issues/153/events
https://github.com/ollama/ollama/issues/153
1,814,970,242
I_kwDOJ0Z1Ps5sLj-C
153
Control model cache location (set ollama directory to something other than ~/.ollama)
{ "login": "weaversam8", "id": 2546219, "node_id": "MDQ6VXNlcjI1NDYyMTk=", "avatar_url": "https://avatars.githubusercontent.com/u/2546219?v=4", "gravatar_id": "", "url": "https://api.github.com/users/weaversam8", "html_url": "https://github.com/weaversam8", "followers_url": "https://api.github.com/users/weaversam8/followers", "following_url": "https://api.github.com/users/weaversam8/following{/other_user}", "gists_url": "https://api.github.com/users/weaversam8/gists{/gist_id}", "starred_url": "https://api.github.com/users/weaversam8/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/weaversam8/subscriptions", "organizations_url": "https://api.github.com/users/weaversam8/orgs", "repos_url": "https://api.github.com/users/weaversam8/repos", "events_url": "https://api.github.com/users/weaversam8/events{/privacy}", "received_events_url": "https://api.github.com/users/weaversam8/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
5
2023-07-21T00:13:40
2023-10-27T16:50:42
2023-10-27T16:50:42
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
It would be useful to configure the location where models are cached, so models could be downloaded and stored on external storage.
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/153/reactions", "total_count": 21, "+1": 21, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/153/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7256
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7256/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7256/comments
https://api.github.com/repos/ollama/ollama/issues/7256/events
https://github.com/ollama/ollama/issues/7256
2,598,039,347
I_kwDOJ0Z1Ps6a2u8z
7,256
Last character being truncated by stop sequence
{ "login": "someone13574", "id": 81528246, "node_id": "MDQ6VXNlcjgxNTI4MjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/81528246?v=4", "gravatar_id": "", "url": "https://api.github.com/users/someone13574", "html_url": "https://github.com/someone13574", "followers_url": "https://api.github.com/users/someone13574/followers", "following_url": "https://api.github.com/users/someone13574/following{/other_user}", "gists_url": "https://api.github.com/users/someone13574/gists{/gist_id}", "starred_url": "https://api.github.com/users/someone13574/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/someone13574/subscriptions", "organizations_url": "https://api.github.com/users/someone13574/orgs", "repos_url": "https://api.github.com/users/someone13574/repos", "events_url": "https://api.github.com/users/someone13574/events{/privacy}", "received_events_url": "https://api.github.com/users/someone13574/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6677367769, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCL2Q", "url": "https://api.github.com/repos/ollama/ollama/labels/needs%20more%20info", "name": "needs more info", "color": "BA8041", "default": false, "description": "More information is needed to assist" } ]
open
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-10-18T17:27:40
2024-11-05T21:06:42
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When running inference in raw mode with '\n\n' as a stop sequence, it seems like punctuation is being removed with the stop sequence. I assume this is because of a bug with how partial stop sequences are handled. ### OS Linux ### GPU Other ### CPU AMD ### Ollama version 0.3.11
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7256/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7256/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/2473
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2473/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2473/comments
https://api.github.com/repos/ollama/ollama/issues/2473/events
https://github.com/ollama/ollama/issues/2473
2,132,184,506
I_kwDOJ0Z1Ps5_Fo26
2,473
Packaging Ollama with ROCm support for Arch Linux
{ "login": "xyproto", "id": 52813, "node_id": "MDQ6VXNlcjUyODEz", "avatar_url": "https://avatars.githubusercontent.com/u/52813?v=4", "gravatar_id": "", "url": "https://api.github.com/users/xyproto", "html_url": "https://github.com/xyproto", "followers_url": "https://api.github.com/users/xyproto/followers", "following_url": "https://api.github.com/users/xyproto/following{/other_user}", "gists_url": "https://api.github.com/users/xyproto/gists{/gist_id}", "starred_url": "https://api.github.com/users/xyproto/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xyproto/subscriptions", "organizations_url": "https://api.github.com/users/xyproto/orgs", "repos_url": "https://api.github.com/users/xyproto/repos", "events_url": "https://api.github.com/users/xyproto/events{/privacy}", "received_events_url": "https://api.github.com/users/xyproto/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
18
2024-02-13T12:08:49
2024-06-03T16:19:21
2024-06-01T20:28:07
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi, Arch Linux maintainer of the `ollama` and `ollama-cuda` packages here. I want to package `ollama-rocm`, with support for AMD/ROCm, but I get error messages when building the package, and wonder if I am enabling support in the right way when building, or not. So far, I am building with `-tags rocm` and have added `clblast`, `rocm-hip-sdk` and `rocm-opencl-sdk` as dependencies. Here is the current error message: ``` [ 12%] Building CXX object common/CMakeFiles/build_info.dir/build-info.cpp.o /opt/rocm/llvm/bin/clang++ -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_MMV_Y=1 -DGGML_USE_CUBLAS -DGGML_USE_HIPBLAS -DK_QUANTS_PER_ITERATION=2 -DUSE_PROF_API=1 -D_GNU_SOURCE -D_XOPEN_SOURCE=600 -D__HIu cd /build/ollama-rocm/src/ollama/llm/llama.cpp/build/linux/x86_64/rocm_v1/common && /opt/rocm/llvm/bin/clang++ -DGGML_USE_CUBLAS -DGGML_USE_HIPBLAS -D_GNU_SOURCE -D_XOPEN_SOURCE=600 -march=p make[3]: Leaving directory '/build/ollama-rocm/src/ollama/llm/llama.cpp/build/linux/x86_64/rocm_v1' [ 12%] Built target build_info /build/ollama-rocm/src/ollama/llm/llama.cpp/ggml-cuda.cu:620:1: warning: function declared 'noreturn' should not return [-Winvalid-noreturn] } ^ /build/ollama-rocm/src/ollama/llm/llama.cpp/ggml-cuda.cu:6240:17: warning: enumeration value 'GGML_OP_POOL_COUNT' not handled in switch [-Wswitch] switch (op) { ^~ /build/ollama-rocm/src/ollama/llm/llama.cpp/ggml-cuda.cu:6252:25: warning: enumeration value 'GGML_OP_POOL_COUNT' not handled in switch [-Wswitch] switch (op) { ^~ /build/ollama-rocm/src/ollama/llm/llama.cpp/ggml-cuda.cu:6240:17: warning: enumeration value 'GGML_OP_POOL_COUNT' not handled in switch [-Wswitch] switch (op) { ^~ /build/ollama-rocm/src/ollama/llm/llama.cpp/ggml-cuda.cu:8908:5: note: in instantiation of function template specialization 'pool2d_nchw_kernel<float, float>' requested here pool2d_nchw_kernel<<<block_nums, CUDA_IM2COL_BLOCK_SIZE, 0, main_stream>>>(IH, IW, OH, OW, k1, k0, s1, s0, p1, p0, parallel_elements, src0_dd, dst_dd, op); ^ /build/ollama-rocm/src/ollama/llm/llama.cpp/ggml-cuda.cu:6252:25: warning: enumeration value 'GGML_OP_POOL_COUNT' not handled in switch [-Wswitch] switch (op) { ^~ error: option 'cf-protection=return' cannot be specified on this target error: option 'cf-protection=branch' cannot be specified on this target 5 warnings and 2 errors generated when compiling for gfx1010. make[3]: *** [CMakeFiles/ggml-rocm.dir/build.make:79: CMakeFiles/ggml-rocm.dir/ggml-cuda.cu.o] Error 1 make[3]: Leaving directory '/build/ollama-rocm/src/ollama/llm/llama.cpp/build/linux/x86_64/rocm_v1' make[2]: *** [CMakeFiles/Makefile2:727: CMakeFiles/ggml-rocm.dir/all] Error 2 make[2]: Leaving directory '/build/ollama-rocm/src/ollama/llm/llama.cpp/build/linux/x86_64/rocm_v1' make[1]: *** [CMakeFiles/Makefile2:2908: examples/server/CMakeFiles/ext_server.dir/rule] Error 2 make[1]: Leaving directory '/build/ollama-rocm/src/ollama/llm/llama.cpp/build/linux/x86_64/rocm_v1' make: *** [Makefile:1183: ext_server] Error 2 ``` And here is the `PKGBUILD` that I am working on: ```bash pkgname=ollama-rocm pkgdesc='Create, run and share large language models (LLMs) with ROCm' pkgver=0.1.24 pkgrel=1 arch=(x86_64) url='https://github.com/jmorganca/ollama' license=(MIT) _ollamacommit=69f392c9b7ea7c5cc3d46c29774e37fdef51abd8 # tag: v0.1.24 _llama_cpp_commit=f57fadc009cbff741a1961cb7896c47d73978d2c makedepends=(clblast cmake git go rocm-hip-sdk rocm-opencl-sdk) provides=(ollama) conflicts=(ollama) source=(git+$url#tag=v$pkgver llama.cpp::git+https://github.com/ggerganov/llama.cpp#commit=$_llama_cpp_commit ollama.service sysusers.conf tmpfiles.d) b2sums=('SKIP' 'SKIP' 'a773bbf16cf5ccc2ee505ad77c3f9275346ddf412be283cfeaee7c2e4c41b8637a31aaff8766ed769524ebddc0c03cf924724452639b62208e578d98b9176124' '3aabf135c4f18e1ad745ae8800db782b25b15305dfeaaa031b4501408ab7e7d01f66e8ebb5be59fc813cfbff6788d08d2e48dcf24ecc480a40ec9db8dbce9fec' 'e8f2b19e2474f30a4f984b45787950012668bf0acb5ad1ebb25cd9776925ab4a6aa927f8131ed53e35b1c71b32c504c700fe5b5145ecd25c7a8284373bb951ed') prepare() { cd ${pkgname/-rocm} rm -frv llm/llama.cpp # Copy git submodule files instead of symlinking because the build process is sensitive to symlinks. cp -r "$srcdir/llama.cpp" llm/llama.cpp # Turn LTO on and set the build type to Release sed -i 's,T_CODE=on,T_CODE=on -D LLAMA_LTO=on -D CMAKE_BUILD_TYPE=Release,g' llm/generate/gen_linux.sh } build() { cd ${pkgname/-rocm} export CGO_CFLAGS="$CFLAGS" CGO_CPPFLAGS="$CPPFLAGS" CGO_CXXFLAGS="$CXXFLAGS" CGO_LDFLAGS="$LDFLAGS" go generate ./... go build -buildmode=pie -trimpath -mod=readonly -modcacherw -ldflags=-linkmode=external \ -ldflags=-buildid='' -ldflags="-X=github.com/jmorganca/ollama/version.Version=$pkgver" -tags rocm } check() { cd ${pkgname/-rocm} go test -tags rocm ./api ./format ./ollama --version > /dev/null } package() { install -Dm755 ${pkgname/-rocm}/${pkgname/-rocm} "$pkgdir/usr/bin/${pkgname/-rocm}" install -dm755 "$pkgdir/var/lib/ollama" install -Dm644 ollama.service "$pkgdir/usr/lib/systemd/system/ollama.service" install -Dm644 sysusers.conf "$pkgdir/usr/lib/sysusers.d/ollama.conf" install -Dm644 tmpfiles.d "$pkgdir/usr/lib/tmpfiles.d/ollama.conf" install -Dm644 ${pkgname/-rocm}/LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE" } ``` In addition to this, solutions for how to set `CMAKE` flags without modifying `gen_linux.sh`, for building with "CPU only", "CUDA only" or "ROCm only" support, are warmly welcome. Thanks in advance.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2473/reactions", "total_count": 7, "+1": 0, "-1": 0, "laugh": 0, "hooray": 7, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2473/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8324
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8324/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8324/comments
https://api.github.com/repos/ollama/ollama/issues/8324/events
https://github.com/ollama/ollama/issues/8324
2,771,469,091
I_kwDOJ0Z1Ps6lMUMj
8,324
Add a CUDA+AVX2(VNNI) runner to the Docker image.
{ "login": "x0wllaar", "id": 10964379, "node_id": "MDQ6VXNlcjEwOTY0Mzc5", "avatar_url": "https://avatars.githubusercontent.com/u/10964379?v=4", "gravatar_id": "", "url": "https://api.github.com/users/x0wllaar", "html_url": "https://github.com/x0wllaar", "followers_url": "https://api.github.com/users/x0wllaar/followers", "following_url": "https://api.github.com/users/x0wllaar/following{/other_user}", "gists_url": "https://api.github.com/users/x0wllaar/gists{/gist_id}", "starred_url": "https://api.github.com/users/x0wllaar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/x0wllaar/subscriptions", "organizations_url": "https://api.github.com/users/x0wllaar/orgs", "repos_url": "https://api.github.com/users/x0wllaar/repos", "events_url": "https://api.github.com/users/x0wllaar/events{/privacy}", "received_events_url": "https://api.github.com/users/x0wllaar/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
0
2025-01-06T21:22:27
2025-01-06T21:46:49
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
**Description**: I would like to ask to add a CUDA+AVX2 (maybe VNNI) model runner to the default Docker image for Ollama. I think this can help with performance in partial offload scenarios. This should be supported at build time (#2281), but for some reason I cant find the runner in the docker image I think that it should be possible for me to do locally by adding `--build-arg CUSTOM_CPU_FLAGS=avx2,avxvnni` to Docker build, but I still think that it can be beneficial to add a runner target named something like `cuda_v12_moderncpu` that will enable AVX2 and AVX-VNNI by default and that will build by default. I might work on this and submit a PR. **Benefits**: * Squeeze some more performance in partial offload **Example Use Case**: Running a 32B model on a 16GB VRAM GPU (in my case 13900HX + Laptop 4090). **Environment Variables/Configuration**: No additional configuration needed. **Related Files and Code**: * make/cuda.make (I guess)
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8324/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8324/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/7935
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7935/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7935/comments
https://api.github.com/repos/ollama/ollama/issues/7935/events
https://github.com/ollama/ollama/pull/7935
2,719,025,694
PR_kwDOJ0Z1Ps6EG_Yf
7,935
Update the /api/create endpoint to use JSON
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
4
2024-12-05T00:00:23
2025-01-01T02:02:33
2025-01-01T02:02:31
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7935", "html_url": "https://github.com/ollama/ollama/pull/7935", "diff_url": "https://github.com/ollama/ollama/pull/7935.diff", "patch_url": "https://github.com/ollama/ollama/pull/7935.patch", "merged_at": "2025-01-01T02:02:31" }
This PR changes the way the POST `/api/create` endpoint works by changing the way the various options/parameters get serialized and passed to the server. Currently the create endpoint requires a `Modelfile`, which is a reasonable on-disk abstraction, but falls down for serializing things such as files and passing them to the server. Also changed in this PR is that the client no longer has to `zip` each of the files before passing them to the server. This made repeated calls to the `/api/create` endpoint very inefficient when updating a template or other parameters because the weights would be zipped and pushed every time `/api/create` is called. I've kept the POST `/api/blobs/:digest` endpoint the same here which instead of being used to pass the Zip file is instead used to pass each of the files individually. The hope is we can parallelize the process in a future PR in order to optimize it for speed. For reviewing the code, I recommend starting in `server/create.go` which breaks out the endpoint. NOTE: This is a pretty large change and not all of the unit tests have been fixed.
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7935/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7935/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8622
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8622/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8622/comments
https://api.github.com/repos/ollama/ollama/issues/8622/events
https://github.com/ollama/ollama/issues/8622
2,814,520,394
I_kwDOJ0Z1Ps6nwixK
8,622
Support for Zero-shot Text Classification Models
{ "login": "BrainSlugs83", "id": 5217366, "node_id": "MDQ6VXNlcjUyMTczNjY=", "avatar_url": "https://avatars.githubusercontent.com/u/5217366?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BrainSlugs83", "html_url": "https://github.com/BrainSlugs83", "followers_url": "https://api.github.com/users/BrainSlugs83/followers", "following_url": "https://api.github.com/users/BrainSlugs83/following{/other_user}", "gists_url": "https://api.github.com/users/BrainSlugs83/gists{/gist_id}", "starred_url": "https://api.github.com/users/BrainSlugs83/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BrainSlugs83/subscriptions", "organizations_url": "https://api.github.com/users/BrainSlugs83/orgs", "repos_url": "https://api.github.com/users/BrainSlugs83/repos", "events_url": "https://api.github.com/users/BrainSlugs83/events{/privacy}", "received_events_url": "https://api.github.com/users/BrainSlugs83/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
0
2025-01-28T03:09:05
2025-01-28T03:09:05
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
It would be helpful to developers if ollama supported zero-shot text classification models, such as [`deberta-v3-large-tasksource-nli`](https://huggingface.co/sileod/deberta-v3-large-tasksource-nli) or other offshoots of BERT, which are fairly small models, that allow you do things like pass in a list of categories and have it classify text (or other inputs) into one of those categories. (Please consider supporting this as a feature, as it would allow us to develop applications, with the only LLM dependency being ollama; and end users would not be required to install a bunch of weird python dependencies or other developer tools.)
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8622/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8622/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1394
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1394/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1394/comments
https://api.github.com/repos/ollama/ollama/issues/1394/events
https://github.com/ollama/ollama/issues/1394
2,027,152,209
I_kwDOJ0Z1Ps540-NR
1,394
magicoder doesn't work
{ "login": "iplayfast", "id": 751306, "node_id": "MDQ6VXNlcjc1MTMwNg==", "avatar_url": "https://avatars.githubusercontent.com/u/751306?v=4", "gravatar_id": "", "url": "https://api.github.com/users/iplayfast", "html_url": "https://github.com/iplayfast", "followers_url": "https://api.github.com/users/iplayfast/followers", "following_url": "https://api.github.com/users/iplayfast/following{/other_user}", "gists_url": "https://api.github.com/users/iplayfast/gists{/gist_id}", "starred_url": "https://api.github.com/users/iplayfast/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/iplayfast/subscriptions", "organizations_url": "https://api.github.com/users/iplayfast/orgs", "repos_url": "https://api.github.com/users/iplayfast/repos", "events_url": "https://api.github.com/users/iplayfast/events{/privacy}", "received_events_url": "https://api.github.com/users/iplayfast/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2023-12-05T21:28:17
2023-12-06T08:34:48
2023-12-06T08:34:48
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
a new model on the library page (magicoder) doesn't work. ollama run magicoder:6.7b-s-ds-q3_K_L
{ "login": "iplayfast", "id": 751306, "node_id": "MDQ6VXNlcjc1MTMwNg==", "avatar_url": "https://avatars.githubusercontent.com/u/751306?v=4", "gravatar_id": "", "url": "https://api.github.com/users/iplayfast", "html_url": "https://github.com/iplayfast", "followers_url": "https://api.github.com/users/iplayfast/followers", "following_url": "https://api.github.com/users/iplayfast/following{/other_user}", "gists_url": "https://api.github.com/users/iplayfast/gists{/gist_id}", "starred_url": "https://api.github.com/users/iplayfast/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/iplayfast/subscriptions", "organizations_url": "https://api.github.com/users/iplayfast/orgs", "repos_url": "https://api.github.com/users/iplayfast/repos", "events_url": "https://api.github.com/users/iplayfast/events{/privacy}", "received_events_url": "https://api.github.com/users/iplayfast/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1394/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1394/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3937
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3937/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3937/comments
https://api.github.com/repos/ollama/ollama/issues/3937/events
https://github.com/ollama/ollama/issues/3937
2,265,378,662
I_kwDOJ0Z1Ps6HBu9m
3,937
``/load`` with no parameters to clear chat context
{ "login": "renauddetry", "id": 720662, "node_id": "MDQ6VXNlcjcyMDY2Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/720662?v=4", "gravatar_id": "", "url": "https://api.github.com/users/renauddetry", "html_url": "https://github.com/renauddetry", "followers_url": "https://api.github.com/users/renauddetry/followers", "following_url": "https://api.github.com/users/renauddetry/following{/other_user}", "gists_url": "https://api.github.com/users/renauddetry/gists{/gist_id}", "starred_url": "https://api.github.com/users/renauddetry/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/renauddetry/subscriptions", "organizations_url": "https://api.github.com/users/renauddetry/orgs", "repos_url": "https://api.github.com/users/renauddetry/repos", "events_url": "https://api.github.com/users/renauddetry/events{/privacy}", "received_events_url": "https://api.github.com/users/renauddetry/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
2
2024-04-26T09:38:42
2024-05-01T21:44:37
2024-05-01T21:44:37
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
It would be fantastic to have a command that clears a chat's context. At the moment, getting a fresh context can be done with - ``/bye``, then starting the client again. Few keystrokes, but long wait time for the model to be reloaded. - ``/load <model>``, where ``<model>`` is the name of the most recently loaded model. Fast (same model stays in memory), but many keystrokes, especially with non-latest models. As discussed [here](https://github.com/ollama/ollama/issues/1751), it would be great to add a ``/load`` command (no parameters) which has the same effect as ``/load <most recent model>``. Thank you for ollama, it's awesome!
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3937/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3937/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8320
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8320/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8320/comments
https://api.github.com/repos/ollama/ollama/issues/8320/events
https://github.com/ollama/ollama/issues/8320
2,770,967,611
I_kwDOJ0Z1Ps6lKZw7
8,320
yi-coder: Suffix not supported
{ "login": "pyscripter", "id": 1311616, "node_id": "MDQ6VXNlcjEzMTE2MTY=", "avatar_url": "https://avatars.githubusercontent.com/u/1311616?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pyscripter", "html_url": "https://github.com/pyscripter", "followers_url": "https://api.github.com/users/pyscripter/followers", "following_url": "https://api.github.com/users/pyscripter/following{/other_user}", "gists_url": "https://api.github.com/users/pyscripter/gists{/gist_id}", "starred_url": "https://api.github.com/users/pyscripter/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pyscripter/subscriptions", "organizations_url": "https://api.github.com/users/pyscripter/orgs", "repos_url": "https://api.github.com/users/pyscripter/repos", "events_url": "https://api.github.com/users/pyscripter/events{/privacy}", "received_events_url": "https://api.github.com/users/pyscripter/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2025-01-06T16:05:52
2025-01-07T04:55:56
2025-01-06T18:56:54
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? The yi-coder [documentation](https://ollama.com/library/yi-coder) provides the following code completion example: ```shell curl http://localhost:11434/api/generate -d '{ "model": "yi-coder", "prompt": "def compute_gcd(a, b):", "suffix": " return result", "options": { "temperature": 0 }, "stream": false }' ``` When running the example I get the following response: `{"error":"registry.ollama.ai/library/yi-coder:latest does not support insert"} ` I get the same response using yi-coder:9b-base. ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.5.4
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8320/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8320/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7896
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7896/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7896/comments
https://api.github.com/repos/ollama/ollama/issues/7896/events
https://github.com/ollama/ollama/issues/7896
2,707,731,203
I_kwDOJ0Z1Ps6hZLMD
7,896
Installing bolt.new and qwen2.5-coder:7b locally (error cudaMalloc failed: out of memory)
{ "login": "LieLust", "id": 34171795, "node_id": "MDQ6VXNlcjM0MTcxNzk1", "avatar_url": "https://avatars.githubusercontent.com/u/34171795?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LieLust", "html_url": "https://github.com/LieLust", "followers_url": "https://api.github.com/users/LieLust/followers", "following_url": "https://api.github.com/users/LieLust/following{/other_user}", "gists_url": "https://api.github.com/users/LieLust/gists{/gist_id}", "starred_url": "https://api.github.com/users/LieLust/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LieLust/subscriptions", "organizations_url": "https://api.github.com/users/LieLust/orgs", "repos_url": "https://api.github.com/users/LieLust/repos", "events_url": "https://api.github.com/users/LieLust/events{/privacy}", "received_events_url": "https://api.github.com/users/LieLust/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
5
2024-11-30T17:41:35
2025-01-13T01:31:04
2025-01-13T01:31:04
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ### Title: Issue with installing **bolt.new** and **qwen2.5-coder:7b** locally (error `cudaMalloc failed: out of memory`) #### Description: I am trying to install **bolt.new** and **qwen2.5-coder:7b** locally, but I get the following error: `{"error":"llama runner process has terminated: cudaMalloc failed: out of memory"}`. The installation of **qwen2.5-coder:7b** fails with this memory error during execution. #### Environment: - **Operating System**: Windows - **Git version**: 2.47.1.windows.1 - **Node version**: v22.11.0 - **pnpm version**: 9.14.4 - **Ollama version**: 0.4.6 - **CPU**: AMD Ryzen 7 7800X3D - **GPU**: AMD Radeon RX 7900 XTX (24GB VRAM) - **RAM**: 32Go #### Context and steps followed: 1. I followed the provided installation guide: [Google Document Guide](https://docs.google.com/document/d/19UNRP1c6ulDS_X7Ig7mRTI_EaT0xcvgimnfOJDKm7ig/edit?tab=t.0). 2. I attempted to install **qwen2.5-coder:7b** by following the steps, but the following error occurred: `{"error":"llama runner process has terminated: cudaMalloc failed: out of memory"}` 3. I suspect this error is related to GPU memory management, but since my **AMD Radeon RX 7900 XTX** has 24GB of VRAM, it doesn't seem to be a capacity issue. #### Error details: - The **cudaMalloc failed: out of memory** error seems to indicate an issue with memory allocation on the GPU. - Despite having sufficient VRAM resources, the error persists during execution. #### Request: - Are there any specific steps or configurations to resolve this memory issue with **qwen2.5-coder:7b**? - Is this a known issue for AMD graphics cards, and are there any recommended solutions or workarounds? Thank you for your help and suggestions! ### OS Windows ### GPU AMD ### CPU AMD ### Ollama version 0.4.6
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7896/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7896/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8666
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8666/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8666/comments
https://api.github.com/repos/ollama/ollama/issues/8666/events
https://github.com/ollama/ollama/issues/8666
2,818,549,002
I_kwDOJ0Z1Ps6n_6UK
8,666
TERMUX ERROR
{ "login": "NeKosmico", "id": 165345955, "node_id": "U_kgDOCdr6ow", "avatar_url": "https://avatars.githubusercontent.com/u/165345955?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NeKosmico", "html_url": "https://github.com/NeKosmico", "followers_url": "https://api.github.com/users/NeKosmico/followers", "following_url": "https://api.github.com/users/NeKosmico/following{/other_user}", "gists_url": "https://api.github.com/users/NeKosmico/gists{/gist_id}", "starred_url": "https://api.github.com/users/NeKosmico/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NeKosmico/subscriptions", "organizations_url": "https://api.github.com/users/NeKosmico/orgs", "repos_url": "https://api.github.com/users/NeKosmico/repos", "events_url": "https://api.github.com/users/NeKosmico/events{/privacy}", "received_events_url": "https://api.github.com/users/NeKosmico/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
1
2025-01-29T15:30:45
2025-01-29T16:10:43
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I wanted to run Ollama in the termux (Android) application, everything was going well... Until the following happened in this part: ```bash ~/ollama $ go build . # github.com/ollama/ollama/discover gpu_info_cudart.c:61:13: warning: comparison of different enumeration types ('cudartReturn_t' (aka 'enum cudartReturn_enum') and 'enum cudaError_enum') [-Wenum-compare] gpu_info_cudart.c:171:60: warning: format specifies type 'unsigned long' but the argument has type 'uint64_t' (aka 'unsigned long long') [-Wformat] ./gpu_info.h:33:23: note: expanded from macro 'LOG' gpu_info_cudart.c:172:59: warning: format specifies type 'unsigned long' but the argument has type 'uint64_t' (aka 'unsigned long long') [-Wformat] ./gpu_info.h:33:23: note: expanded from macro 'LOG' gpu_info_cudart.c:173:59: warning: format specifies type 'unsigned long' but the argument has type 'uint64_t' (aka 'unsigned long long') [-Wformat] ./gpu_info.h:33:23: note: expanded from macro 'LOG' # github.com/ollama/ollama/discover gpu_info_nvcuda.c:196:63: warning: format specifies type 'unsigned long' but the argument has type 'uint64_t' (aka 'unsigned long long') [-Wformat] ./gpu_info.h:33:23: note: expanded from macro 'LOG' gpu_info_nvcuda.c:197:62: warning: format specifies type 'unsigned long' but the argument has type 'uint64_t' (aka 'unsigned long long') [-Wformat] ./gpu_info.h:33:23: note: expanded from macro 'LOG' ``` I don't know if there is any solution or not, Your answers are appreciated :'3 ### OS Linux ### GPU _No response_ ### CPU _No response_ ### Ollama version 0.5.12
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8666/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8666/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/8651
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8651/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8651/comments
https://api.github.com/repos/ollama/ollama/issues/8651/events
https://github.com/ollama/ollama/issues/8651
2,817,464,249
I_kwDOJ0Z1Ps6n7xe5
8,651
Intel ARC 770 memory does not support
{ "login": "yiteei", "id": 77902908, "node_id": "MDQ6VXNlcjc3OTAyOTA4", "avatar_url": "https://avatars.githubusercontent.com/u/77902908?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yiteei", "html_url": "https://github.com/yiteei", "followers_url": "https://api.github.com/users/yiteei/followers", "following_url": "https://api.github.com/users/yiteei/following{/other_user}", "gists_url": "https://api.github.com/users/yiteei/gists{/gist_id}", "starred_url": "https://api.github.com/users/yiteei/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yiteei/subscriptions", "organizations_url": "https://api.github.com/users/yiteei/orgs", "repos_url": "https://api.github.com/users/yiteei/repos", "events_url": "https://api.github.com/users/yiteei/events{/privacy}", "received_events_url": "https://api.github.com/users/yiteei/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2025-01-29T07:42:05
2025-01-29T23:28:53
2025-01-29T23:28:52
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ![Image](https://github.com/user-attachments/assets/9c4323e9-966c-435e-bd5c-f9f749322d94) Windows 11 24H2 Intel ARC 770 Intel I5-12600K ### OS Windows ### GPU Intel ### CPU Intel ### Ollama version 0.5.7
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8651/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8651/timeline
null
completed
false