url
stringlengths
51
54
repository_url
stringclasses
1 value
labels_url
stringlengths
65
68
comments_url
stringlengths
60
63
events_url
stringlengths
58
61
html_url
stringlengths
39
44
id
int64
1.78B
2.82B
node_id
stringlengths
18
19
number
int64
1
8.69k
title
stringlengths
1
382
user
dict
labels
listlengths
0
5
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
2
milestone
null
comments
int64
0
323
created_at
timestamp[s]
updated_at
timestamp[s]
closed_at
timestamp[s]
author_association
stringclasses
4 values
sub_issues_summary
dict
active_lock_reason
null
draft
bool
2 classes
pull_request
dict
body
stringlengths
2
118k
closed_by
dict
reactions
dict
timeline_url
stringlengths
60
63
performed_via_github_app
null
state_reason
stringclasses
4 values
is_pull_request
bool
2 classes
https://api.github.com/repos/ollama/ollama/issues/8012
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8012/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8012/comments
https://api.github.com/repos/ollama/ollama/issues/8012/events
https://github.com/ollama/ollama/pull/8012
2,726,933,403
PR_kwDOJ0Z1Ps6EiDxA
8,012
Return err when NewHipLib() detect error.
{ "login": "zhanluxianshen", "id": 161462588, "node_id": "U_kgDOCZ-5PA", "avatar_url": "https://avatars.githubusercontent.com/u/161462588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zhanluxianshen", "html_url": "https://github.com/zhanluxianshen", "followers_url": "https://api.github.com/users/zhanluxianshen/followers", "following_url": "https://api.github.com/users/zhanluxianshen/following{/other_user}", "gists_url": "https://api.github.com/users/zhanluxianshen/gists{/gist_id}", "starred_url": "https://api.github.com/users/zhanluxianshen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zhanluxianshen/subscriptions", "organizations_url": "https://api.github.com/users/zhanluxianshen/orgs", "repos_url": "https://api.github.com/users/zhanluxianshen/repos", "events_url": "https://api.github.com/users/zhanluxianshen/events{/privacy}", "received_events_url": "https://api.github.com/users/zhanluxianshen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-12-09T12:39:53
2024-12-11T00:44:40
2024-12-11T00:32:29
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/8012", "html_url": "https://github.com/ollama/ollama/pull/8012", "diff_url": "https://github.com/ollama/ollama/pull/8012.diff", "patch_url": "https://github.com/ollama/ollama/pull/8012.patch", "merged_at": "2024-12-11T00:32:29" }
Return err when NewHipLib() detect error.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8012/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8012/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3338
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3338/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3338/comments
https://api.github.com/repos/ollama/ollama/issues/3338/events
https://github.com/ollama/ollama/pull/3338
2,205,158,223
PR_kwDOJ0Z1Ps5qoEpz
3,338
Update README.md
{ "login": "drazdra", "id": 133811709, "node_id": "U_kgDOB_nN_Q", "avatar_url": "https://avatars.githubusercontent.com/u/133811709?v=4", "gravatar_id": "", "url": "https://api.github.com/users/drazdra", "html_url": "https://github.com/drazdra", "followers_url": "https://api.github.com/users/drazdra/followers", "following_url": "https://api.github.com/users/drazdra/following{/other_user}", "gists_url": "https://api.github.com/users/drazdra/gists{/gist_id}", "starred_url": "https://api.github.com/users/drazdra/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/drazdra/subscriptions", "organizations_url": "https://api.github.com/users/drazdra/orgs", "repos_url": "https://api.github.com/users/drazdra/repos", "events_url": "https://api.github.com/users/drazdra/events{/privacy}", "received_events_url": "https://api.github.com/users/drazdra/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-03-25T08:19:05
2024-03-26T06:49:38
2024-03-25T18:50:51
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3338", "html_url": "https://github.com/ollama/ollama/pull/3338", "diff_url": "https://github.com/ollama/ollama/pull/3338.diff", "patch_url": "https://github.com/ollama/ollama/pull/3338.patch", "merged_at": "2024-03-25T18:50:51" }
adding drazdra/ollama-chats to the list of UI :)
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3338/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3338/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7192
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7192/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7192/comments
https://api.github.com/repos/ollama/ollama/issues/7192/events
https://github.com/ollama/ollama/pull/7192
2,584,119,538
PR_kwDOJ0Z1Ps5-dBWG
7,192
Fix regression on older macos versions
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
5
2024-10-13T17:08:14
2024-10-13T17:47:45
2024-10-13T17:47:42
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7192", "html_url": "https://github.com/ollama/ollama/pull/7192", "diff_url": "https://github.com/ollama/ollama/pull/7192.diff", "patch_url": "https://github.com/ollama/ollama/pull/7192.patch", "merged_at": "2024-10-13T17:47:42" }
The new cgo compilation requires a flag to target older macos versions Fixes #7190
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7192/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7192/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1081
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1081/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1081/comments
https://api.github.com/repos/ollama/ollama/issues/1081/events
https://github.com/ollama/ollama/issues/1081
1,988,673,245
I_kwDOJ0Z1Ps52iL7d
1,081
Support for additional formats such as `yaml`
{ "login": "breitburg", "id": 25728414, "node_id": "MDQ6VXNlcjI1NzI4NDE0", "avatar_url": "https://avatars.githubusercontent.com/u/25728414?v=4", "gravatar_id": "", "url": "https://api.github.com/users/breitburg", "html_url": "https://github.com/breitburg", "followers_url": "https://api.github.com/users/breitburg/followers", "following_url": "https://api.github.com/users/breitburg/following{/other_user}", "gists_url": "https://api.github.com/users/breitburg/gists{/gist_id}", "starred_url": "https://api.github.com/users/breitburg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/breitburg/subscriptions", "organizations_url": "https://api.github.com/users/breitburg/orgs", "repos_url": "https://api.github.com/users/breitburg/repos", "events_url": "https://api.github.com/users/breitburg/events{/privacy}", "received_events_url": "https://api.github.com/users/breitburg/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
1
2023-11-11T02:49:48
2023-11-14T14:34:31
null
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Good afternoon! Recently, Ollama received an update that introduces [JSON mode](https://github.com/jmorganca/ollama/blob/main/docs/api.md#json-mode) to the generate method. Although it works well, there is an issue: it is not possible to decode JSON during generation. Meanwhile, YAML is completely decodable while being generated. Therefore, I propose the addition of YAML mode as a response format. Thank you for your work!
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1081/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1081/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/5120
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5120/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5120/comments
https://api.github.com/repos/ollama/ollama/issues/5120/events
https://github.com/ollama/ollama/issues/5120
2,360,492,581
I_kwDOJ0Z1Ps6MskIl
5,120
deepseek-code-v2
{ "login": "MeDott29", "id": 13264408, "node_id": "MDQ6VXNlcjEzMjY0NDA4", "avatar_url": "https://avatars.githubusercontent.com/u/13264408?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MeDott29", "html_url": "https://github.com/MeDott29", "followers_url": "https://api.github.com/users/MeDott29/followers", "following_url": "https://api.github.com/users/MeDott29/following{/other_user}", "gists_url": "https://api.github.com/users/MeDott29/gists{/gist_id}", "starred_url": "https://api.github.com/users/MeDott29/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MeDott29/subscriptions", "organizations_url": "https://api.github.com/users/MeDott29/orgs", "repos_url": "https://api.github.com/users/MeDott29/repos", "events_url": "https://api.github.com/users/MeDott29/events{/privacy}", "received_events_url": "https://api.github.com/users/MeDott29/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
5
2024-06-18T18:52:48
2024-06-24T21:50:02
2024-06-19T02:35:26
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I don't have a problem running codestral so the problem isn't with the model size, right? ```bash ollama run deepseek-coder-v2 pulling manifest pulling 5ff0abeeac1d... 100% ▕██████████████████████████████████████████████████████████████████████████████████▏ 8.9 GB pulling 732caedf08d1... 100% ▕██████████████████████████████████████████████████████████████████████████████████▏ 112 B pulling 4bb71764481f... 100% ▕██████████████████████████████████████████████████████████████████████████████████▏ 13 KB pulling 1c8f573e830c... 100% ▕██████████████████████████████████████████████████████████████████████████████████▏ 1.1 KB pulling 19f2fb9e8bc6... 100% ▕██████████████████████████████████████████████████████████████████████████████████▏ 32 B pulling c17ee51fe152... 100% ▕██████████████████████████████████████████████████████████████████████████████████▏ 568 B verifying sha256 digest writing manifest removing any unused layers success Error: llama runner process has terminated: signal: aborted (core dumped) error:failed to create context with model '/usr/share/ollama/.ollama/models/blobs/sha256-5ff0abeeac1d2dbdd5455c0b49ba3b29a9ce3c1fb181b2eef2e948689d55d046' ``` Same issue with deepseek-v2 ```bash ``` ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.1.44
{ "login": "MeDott29", "id": 13264408, "node_id": "MDQ6VXNlcjEzMjY0NDA4", "avatar_url": "https://avatars.githubusercontent.com/u/13264408?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MeDott29", "html_url": "https://github.com/MeDott29", "followers_url": "https://api.github.com/users/MeDott29/followers", "following_url": "https://api.github.com/users/MeDott29/following{/other_user}", "gists_url": "https://api.github.com/users/MeDott29/gists{/gist_id}", "starred_url": "https://api.github.com/users/MeDott29/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MeDott29/subscriptions", "organizations_url": "https://api.github.com/users/MeDott29/orgs", "repos_url": "https://api.github.com/users/MeDott29/repos", "events_url": "https://api.github.com/users/MeDott29/events{/privacy}", "received_events_url": "https://api.github.com/users/MeDott29/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5120/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5120/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4224
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4224/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4224/comments
https://api.github.com/repos/ollama/ollama/issues/4224/events
https://github.com/ollama/ollama/issues/4224
2,282,725,932
I_kwDOJ0Z1Ps6ID6Is
4,224
embeddings support batch?
{ "login": "yuanjie-ai", "id": 20265321, "node_id": "MDQ6VXNlcjIwMjY1MzIx", "avatar_url": "https://avatars.githubusercontent.com/u/20265321?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yuanjie-ai", "html_url": "https://github.com/yuanjie-ai", "followers_url": "https://api.github.com/users/yuanjie-ai/followers", "following_url": "https://api.github.com/users/yuanjie-ai/following{/other_user}", "gists_url": "https://api.github.com/users/yuanjie-ai/gists{/gist_id}", "starred_url": "https://api.github.com/users/yuanjie-ai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yuanjie-ai/subscriptions", "organizations_url": "https://api.github.com/users/yuanjie-ai/orgs", "repos_url": "https://api.github.com/users/yuanjie-ai/repos", "events_url": "https://api.github.com/users/yuanjie-ai/events{/privacy}", "received_events_url": "https://api.github.com/users/yuanjie-ai/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
1
2024-05-07T09:20:17
2024-07-15T19:14:25
2024-07-15T19:14:25
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
embeddings support batch?
{ "login": "royjhan", "id": 65097070, "node_id": "MDQ6VXNlcjY1MDk3MDcw", "avatar_url": "https://avatars.githubusercontent.com/u/65097070?v=4", "gravatar_id": "", "url": "https://api.github.com/users/royjhan", "html_url": "https://github.com/royjhan", "followers_url": "https://api.github.com/users/royjhan/followers", "following_url": "https://api.github.com/users/royjhan/following{/other_user}", "gists_url": "https://api.github.com/users/royjhan/gists{/gist_id}", "starred_url": "https://api.github.com/users/royjhan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/royjhan/subscriptions", "organizations_url": "https://api.github.com/users/royjhan/orgs", "repos_url": "https://api.github.com/users/royjhan/repos", "events_url": "https://api.github.com/users/royjhan/events{/privacy}", "received_events_url": "https://api.github.com/users/royjhan/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4224/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4224/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/940
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/940/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/940/comments
https://api.github.com/repos/ollama/ollama/issues/940/events
https://github.com/ollama/ollama/pull/940
1,966,365,508
PR_kwDOJ0Z1Ps5eBI7r
940
Don't quit ioloop on `NUL` character
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-10-28T02:08:56
2023-10-29T05:48:43
2023-10-28T03:01:49
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/940", "html_url": "https://github.com/ollama/ollama/pull/940", "diff_url": "https://github.com/ollama/ollama/pull/940.diff", "patch_url": "https://github.com/ollama/ollama/pull/940.patch", "merged_at": "2023-10-28T03:01:49" }
The `stdin` read loop would stop on receiving a `NUL` character, triggered by ctrl+space or other key combos.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/940/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/940/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5461
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5461/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5461/comments
https://api.github.com/repos/ollama/ollama/issues/5461/events
https://github.com/ollama/ollama/issues/5461
2,388,625,850
I_kwDOJ0Z1Ps6OX4m6
5,461
Webhook support
{ "login": "drale2k", "id": 271416, "node_id": "MDQ6VXNlcjI3MTQxNg==", "avatar_url": "https://avatars.githubusercontent.com/u/271416?v=4", "gravatar_id": "", "url": "https://api.github.com/users/drale2k", "html_url": "https://github.com/drale2k", "followers_url": "https://api.github.com/users/drale2k/followers", "following_url": "https://api.github.com/users/drale2k/following{/other_user}", "gists_url": "https://api.github.com/users/drale2k/gists{/gist_id}", "starred_url": "https://api.github.com/users/drale2k/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/drale2k/subscriptions", "organizations_url": "https://api.github.com/users/drale2k/orgs", "repos_url": "https://api.github.com/users/drale2k/repos", "events_url": "https://api.github.com/users/drale2k/events{/privacy}", "received_events_url": "https://api.github.com/users/drale2k/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
1
2024-07-03T13:25:18
2024-12-02T03:08:51
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Since Ollama now supports parallel requests since https://github.com/ollama/ollama/issues/358, i would like to suggest support for webhooks on completed generations. The reason for this is that because longer running tasks like summarization can take some time, and Ollama will queue up generations if you send more than your server can handle in parallel. This causes my web app having to leave an open handle to the HTTP request which exhausts my server resources since i have to do hundreds of summarizations per day. If we could have a simple webhook to report on the status change of a generation (completed, error), parallelisation would be much more useful. I think Replicate does a good job and a first implementation could be inspired by [their API](https://replicate.com/docs/webhooks) and be smaller in scope.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5461/reactions", "total_count": 6, "+1": 6, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5461/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/11
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/11/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/11/comments
https://api.github.com/repos/ollama/ollama/issues/11/events
https://github.com/ollama/ollama/pull/11
1,779,476,648
PR_kwDOJ0Z1Ps5UK4W-
11
interactive generate
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-06-28T18:24:35
2023-06-28T18:32:09
2023-06-28T18:32:06
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/11", "html_url": "https://github.com/ollama/ollama/pull/11", "diff_url": "https://github.com/ollama/ollama/pull/11.diff", "patch_url": "https://github.com/ollama/ollama/pull/11.patch", "merged_at": "2023-06-28T18:32:06" }
generate prompt is now optional. if a prompt is omitted, it will start an interactive session previous behaviour with prompt ``` $ ollama generate ~/Downloads/vicuna-7b-v1.3.ggmlv3.q4_0.bin 'Hi!' >>> Hi! Hello! How can I help you today? ``` new behaviour without a prompt. ctrl-c to exit interactive mode ``` $ ollama generate ~/Downloads/vicuna-7b-v1.3.ggmlv3.q4_0.bin >>> Hi! Hello! How can I help you today? >>> How are you? I am just a computer program, so I don't have feelings or emotions like humans do. I exist solely to provide information and assist with tasks to the best of my abilities. Is there anything specific you would like help with today? >>> ``` new behaviour with a input file ``` $ ollama generate orca-mini-3b.ggmlv3.q4_0 <questions.txt >>> Hi! Hello! How can I assist you today? >>> How are you? I'm doing well, thank you for asking. How about you? >>> ```
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/11/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/11/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1973
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1973/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1973/comments
https://api.github.com/repos/ollama/ollama/issues/1973/events
https://github.com/ollama/ollama/issues/1973
2,080,178,880
I_kwDOJ0Z1Ps57_QLA
1,973
[v0.1.20] Ollama crashes quite often for Fedora 39 with NVIDIA T1200 Laptop GPU
{ "login": "ilovepumpkin", "id": 11310645, "node_id": "MDQ6VXNlcjExMzEwNjQ1", "avatar_url": "https://avatars.githubusercontent.com/u/11310645?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ilovepumpkin", "html_url": "https://github.com/ilovepumpkin", "followers_url": "https://api.github.com/users/ilovepumpkin/followers", "following_url": "https://api.github.com/users/ilovepumpkin/following{/other_user}", "gists_url": "https://api.github.com/users/ilovepumpkin/gists{/gist_id}", "starred_url": "https://api.github.com/users/ilovepumpkin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ilovepumpkin/subscriptions", "organizations_url": "https://api.github.com/users/ilovepumpkin/orgs", "repos_url": "https://api.github.com/users/ilovepumpkin/repos", "events_url": "https://api.github.com/users/ilovepumpkin/events{/privacy}", "received_events_url": "https://api.github.com/users/ilovepumpkin/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-01-13T08:19:26
2024-01-13T09:01:57
2024-01-13T09:01:57
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
This the reopen issue for https://github.com/jmorganca/ollama/issues/1887 . I am still getting the "out of memory" error. Here is my logs =============================================== ilovepumpkin:Downloads$ ollama serve 2024/01/13 16:01:14 images.go:808: total blobs: 17 2024/01/13 16:01:14 images.go:815: total unused blobs removed: 0 2024/01/13 16:01:14 routes.go:930: Listening on 127.0.0.1:11434 (version 0.1.20) 2024/01/13 16:01:14 shim_ext_server.go:142: Dynamic LLM variants [cuda rocm] 2024/01/13 16:01:14 gpu.go:88: Detecting GPU type 2024/01/13 16:01:14 gpu.go:203: Searching for GPU management library libnvidia-ml.so 2024/01/13 16:01:14 gpu.go:248: Discovered GPU libraries: [/usr/lib64/libnvidia-ml.so.545.29.06] 2024/01/13 16:01:14 gpu.go:94: Nvidia GPU detected 2024/01/13 16:01:14 gpu.go:135: CUDA Compute Capability detected: 7.5 2024/01/13 16:02:29 gpu.go:135: CUDA Compute Capability detected: 7.5 2024/01/13 16:02:29 gpu.go:135: CUDA Compute Capability detected: 7.5 2024/01/13 16:02:29 shim_ext_server_linux.go:24: Updating PATH to /home/ilovepumpkin/.nvm/versions/node/v18.16.0/bin:/home/ilovepumpkin/.local/bin:/home/ilovepumpkin/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/var/lib/snapd/snap/bin:/home/ilovepumpkin/work/apache-maven-3.9.1/bin:/home/ilovepumpkin/git/infohub-team-tools/ui-dev:/home/ilovepumpkin/git/infohub-tools/service-scripts:/home/ilovepumpkin/git/infohub-tools/rexvpn:/home/ilovepumpkin/git/infohub-tools/maven:/home/ilovepumpkin/work/apache-maven-3.9.1/bin:/home/ilovepumpkin/git/infohub-team-tools/ui-dev:/home/ilovepumpkin/git/infohub-tools/service-scripts:/home/ilovepumpkin/git/infohub-tools/rexvpn:/home/ilovepumpkin/git/infohub-tools/maven:/tmp/ollama1410717628/cuda Lazy loading /tmp/ollama1410717628/cuda/libext_server.so library 2024/01/13 16:02:29 shim_ext_server.go:92: Loading Dynamic Shim llm server: /tmp/ollama1410717628/cuda/libext_server.so 2024/01/13 16:02:29 ext_server_common.go:136: Initializing internal llama server ggml_init_cublas: GGML_CUDA_FORCE_MMQ: no ggml_init_cublas: CUDA_USE_TENSOR_CORES: yes ggml_init_cublas: found 1 CUDA devices: Device 0: NVIDIA T1200 Laptop GPU, compute capability 7.5 llama_model_loader: loaded meta data with 20 key-value pairs and 291 tensors from /home/ilovepumpkin/.ollama/models/blobs/sha256:3a43f93b78ec50f7c4e4dc8bd1cb3fff5a900e7d574c51a6f7495e48486e0dac (version GGUF V2) llama_model_loader: - tensor 0: token_embd.weight q4_0 [ 4096, 32016, 1, 1 ] llama_model_loader: - tensor 1: blk.0.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 2: blk.0.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 3: blk.0.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 4: blk.0.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 5: blk.0.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 6: blk.0.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 7: blk.0.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 8: blk.0.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 9: blk.0.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 10: blk.1.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 11: blk.1.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 12: blk.1.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 13: blk.1.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 14: blk.1.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 15: blk.1.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 16: blk.1.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 17: blk.1.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 18: blk.1.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 19: blk.10.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 20: blk.10.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 21: blk.10.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 22: blk.10.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 23: blk.10.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 24: blk.10.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 25: blk.10.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 26: blk.10.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 27: blk.10.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 28: blk.11.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 29: blk.11.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 30: blk.11.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 31: blk.11.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 32: blk.11.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 33: blk.11.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 34: blk.11.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 35: blk.11.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 36: blk.11.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 37: blk.12.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 38: blk.12.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 39: blk.12.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 40: blk.12.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 41: blk.12.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 42: blk.12.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 43: blk.12.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 44: blk.12.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 45: blk.12.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 46: blk.13.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 47: blk.13.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 48: blk.13.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 49: blk.13.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 50: blk.13.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 51: blk.13.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 52: blk.13.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 53: blk.13.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 54: blk.13.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 55: blk.14.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 56: blk.14.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 57: blk.14.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 58: blk.14.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 59: blk.14.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 60: blk.14.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 61: blk.14.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 62: blk.14.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 63: blk.14.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 64: blk.15.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 65: blk.15.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 66: blk.15.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 67: blk.15.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 68: blk.15.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 69: blk.15.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 70: blk.15.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 71: blk.15.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 72: blk.15.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 73: blk.16.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 74: blk.16.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 75: blk.16.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 76: blk.16.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 77: blk.16.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 78: blk.16.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 79: blk.16.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 80: blk.16.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 81: blk.16.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 82: blk.17.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 83: blk.17.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 84: blk.17.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 85: blk.17.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 86: blk.17.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 87: blk.17.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 88: blk.17.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 89: blk.17.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 90: blk.17.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 91: blk.18.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 92: blk.18.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 93: blk.18.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 94: blk.18.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 95: blk.18.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 96: blk.18.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 97: blk.18.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 98: blk.18.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 99: blk.18.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 100: blk.19.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 101: blk.19.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 102: blk.19.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 103: blk.19.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 104: blk.19.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 105: blk.19.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 106: blk.19.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 107: blk.19.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 108: blk.19.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 109: blk.2.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 110: blk.2.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 111: blk.2.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 112: blk.2.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 113: blk.2.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 114: blk.2.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 115: blk.2.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 116: blk.2.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 117: blk.2.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 118: blk.20.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 119: blk.20.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 120: blk.20.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 121: blk.20.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 122: blk.20.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 123: blk.20.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 124: blk.20.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 125: blk.20.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 126: blk.20.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 127: blk.21.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 128: blk.21.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 129: blk.21.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 130: blk.21.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 131: blk.21.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 132: blk.21.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 133: blk.21.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 134: blk.21.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 135: blk.21.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 136: blk.22.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 137: blk.22.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 138: blk.22.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 139: blk.22.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 140: blk.22.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 141: blk.22.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 142: blk.22.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 143: blk.22.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 144: blk.22.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 145: blk.23.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 146: blk.23.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 147: blk.23.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 148: blk.23.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 149: blk.23.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 150: blk.23.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 151: blk.23.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 152: blk.23.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 153: blk.23.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 154: blk.3.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 155: blk.3.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 156: blk.3.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 157: blk.3.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 158: blk.3.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 159: blk.3.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 160: blk.3.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 161: blk.3.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 162: blk.3.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 163: blk.4.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 164: blk.4.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 165: blk.4.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 166: blk.4.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 167: blk.4.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 168: blk.4.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 169: blk.4.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 170: blk.4.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 171: blk.4.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 172: blk.5.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 173: blk.5.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 174: blk.5.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 175: blk.5.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 176: blk.5.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 177: blk.5.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 178: blk.5.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 179: blk.5.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 180: blk.5.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 181: blk.6.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 182: blk.6.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 183: blk.6.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 184: blk.6.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 185: blk.6.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 186: blk.6.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 187: blk.6.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 188: blk.6.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 189: blk.6.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 190: blk.7.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 191: blk.7.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 192: blk.7.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 193: blk.7.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 194: blk.7.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 195: blk.7.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 196: blk.7.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 197: blk.7.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 198: blk.7.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 199: blk.8.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 200: blk.8.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 201: blk.8.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 202: blk.8.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 203: blk.8.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 204: blk.8.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 205: blk.8.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 206: blk.8.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 207: blk.8.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 208: blk.9.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 209: blk.9.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 210: blk.9.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 211: blk.9.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 212: blk.9.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 213: blk.9.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 214: blk.9.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 215: blk.9.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 216: blk.9.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 217: output.weight q6_K [ 4096, 32016, 1, 1 ] llama_model_loader: - tensor 218: blk.24.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 219: blk.24.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 220: blk.24.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 221: blk.24.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 222: blk.24.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 223: blk.24.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 224: blk.24.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 225: blk.24.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 226: blk.24.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 227: blk.25.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 228: blk.25.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 229: blk.25.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 230: blk.25.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 231: blk.25.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 232: blk.25.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 233: blk.25.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 234: blk.25.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 235: blk.25.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 236: blk.26.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 237: blk.26.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 238: blk.26.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 239: blk.26.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 240: blk.26.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 241: blk.26.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 242: blk.26.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 243: blk.26.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 244: blk.26.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 245: blk.27.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 246: blk.27.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 247: blk.27.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 248: blk.27.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 249: blk.27.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 250: blk.27.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 251: blk.27.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 252: blk.27.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 253: blk.27.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 254: blk.28.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 255: blk.28.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 256: blk.28.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 257: blk.28.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 258: blk.28.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 259: blk.28.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 260: blk.28.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 261: blk.28.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 262: blk.28.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 263: blk.29.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 264: blk.29.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 265: blk.29.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 266: blk.29.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 267: blk.29.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 268: blk.29.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 269: blk.29.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 270: blk.29.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 271: blk.29.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 272: blk.30.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 273: blk.30.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 274: blk.30.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 275: blk.30.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 276: blk.30.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 277: blk.30.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 278: blk.30.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 279: blk.30.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 280: blk.30.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 281: blk.31.attn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 282: blk.31.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ] llama_model_loader: - tensor 283: blk.31.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 284: blk.31.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ] llama_model_loader: - tensor 285: blk.31.ffn_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: - tensor 286: blk.31.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 287: blk.31.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 288: blk.31.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 289: blk.31.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ] llama_model_loader: - tensor 290: output_norm.weight f32 [ 4096, 1, 1, 1 ] llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = llama llama_model_loader: - kv 1: general.name str = codellama llama_model_loader: - kv 2: llama.context_length u32 = 16384 llama_model_loader: - kv 3: llama.embedding_length u32 = 4096 llama_model_loader: - kv 4: llama.block_count u32 = 32 llama_model_loader: - kv 5: llama.feed_forward_length u32 = 11008 llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128 llama_model_loader: - kv 7: llama.attention.head_count u32 = 32 llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 32 llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 llama_model_loader: - kv 10: llama.rope.freq_base f32 = 1000000.000000 llama_model_loader: - kv 11: general.file_type u32 = 2 llama_model_loader: - kv 12: tokenizer.ggml.model str = llama llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,32016] = ["<unk>", "<s>", "</s>", "<0x00>", "<... llama_model_loader: - kv 14: tokenizer.ggml.scores arr[f32,32016] = [0.000000, 0.000000, 0.000000, 0.0000... llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,32016] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ... llama_model_loader: - kv 16: tokenizer.ggml.bos_token_id u32 = 1 llama_model_loader: - kv 17: tokenizer.ggml.eos_token_id u32 = 2 llama_model_loader: - kv 18: tokenizer.ggml.unknown_token_id u32 = 0 llama_model_loader: - kv 19: general.quantization_version u32 = 2 llama_model_loader: - type f32: 65 tensors llama_model_loader: - type q4_0: 225 tensors llama_model_loader: - type q6_K: 1 tensors llm_load_vocab: mismatch in special tokens definition ( 264/32016 vs 259/32016 ). llm_load_print_meta: format = GGUF V2 llm_load_print_meta: arch = llama llm_load_print_meta: vocab type = SPM llm_load_print_meta: n_vocab = 32016 llm_load_print_meta: n_merges = 0 llm_load_print_meta: n_ctx_train = 16384 llm_load_print_meta: n_embd = 4096 llm_load_print_meta: n_head = 32 llm_load_print_meta: n_head_kv = 32 llm_load_print_meta: n_layer = 32 llm_load_print_meta: n_rot = 128 llm_load_print_meta: n_gqa = 1 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-05 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: n_ff = 11008 llm_load_print_meta: n_expert = 0 llm_load_print_meta: n_expert_used = 0 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 1000000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_yarn_orig_ctx = 16384 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: model type = 7B llm_load_print_meta: model ftype = Q4_0 llm_load_print_meta: model params = 6.74 B llm_load_print_meta: model size = 3.56 GiB (4.54 BPW) llm_load_print_meta: general.name = codellama llm_load_print_meta: BOS token = 1 '<s>' llm_load_print_meta: EOS token = 2 '</s>' llm_load_print_meta: UNK token = 0 '<unk>' llm_load_print_meta: LF token = 13 '<0x0A>' llm_load_tensors: ggml ctx size = 0.11 MiB llm_load_tensors: using CUDA for GPU acceleration llm_load_tensors: mem required = 1476.19 MiB llm_load_tensors: offloading 20 repeating layers to GPU llm_load_tensors: offloaded 20/33 layers to GPU llm_load_tensors: VRAM used: 2171.88 MiB .................................................................................................. llama_new_context_with_model: n_ctx = 2048 llama_new_context_with_model: freq_base = 1000000.0 llama_new_context_with_model: freq_scale = 1 llama_kv_cache_init: VRAM kv self = 640.00 MB llama_new_context_with_model: KV self size = 1024.00 MiB, K (f16): 512.00 MiB, V (f16): 512.00 MiB llama_build_graph: non-view tensors processed: 676/676 llama_new_context_with_model: compute buffer total size = 159.19 MiB llama_new_context_with_model: VRAM scratch buffer: 156.00 MiB llama_new_context_with_model: total VRAM used: 2967.88 MiB (model: 2171.88 MiB, context: 796.00 MiB) 2024/01/13 16:02:33 ext_server_common.go:144: Starting internal llama main loop 2024/01/13 16:02:33 ext_server_common.go:158: loaded 0 images CUDA error 2 at /go/src/github.com/jmorganca/ollama/llm/llama.cpp/ggml-cuda.cu:6600: out of memory current device: 0 GGML_ASSERT: /go/src/github.com/jmorganca/ollama/llm/llama.cpp/ggml-cuda.cu:6600: !"CUDA error" [New LWP 69610] [New LWP 69611] [New LWP 69612] [New LWP 69613] [New LWP 69614] [New LWP 69615] [New LWP 69616] [New LWP 69617] [New LWP 69618] [New LWP 69619] [New LWP 70591] [New LWP 70592] [New LWP 70593] [New LWP 70594] [New LWP 70595] [New LWP 70596] [New LWP 70597] [New LWP 70598] [New LWP 70599] [New LWP 70600] [New LWP 70601] [New LWP 70605] [New LWP 70606] [New LWP 70631] [New LWP 70632] [New LWP 70633] [New LWP 70634] [New LWP 70635] [New LWP 70636] [New LWP 70637] [New LWP 70638] This GDB supports auto-downloading debuginfo from the following URLs: <https://debuginfod.fedoraproject.org/> Enable debuginfod for this session? (y or [n]) [answered N; input not from terminal] Debuginfod has been disabled. To make this setting permanent, add 'set debuginfod enabled off' to .gdbinit. [Thread debugging using libthread_db enabled] Using host libthread_db library "/lib64/libthread_db.so.1". 0x000000000048f763 in ?? () #0 0x000000000048f763 in ?? () #1 0x0000000000457570 in ?? () #2 0x0000000017cac208 in ?? () #3 0x0000000000000080 in ?? () #4 0x0000000000000000 in ?? () [Inferior 1 (process 69609) detached] Aborted (core dumped) ilovepumpkin:Downloads$
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1973/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1973/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6610
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6610/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6610/comments
https://api.github.com/repos/ollama/ollama/issues/6610/events
https://github.com/ollama/ollama/pull/6610
2,503,439,968
PR_kwDOJ0Z1Ps56S0TQ
6,610
llama: fix sync script ggml-metal_darwin_arm64.m filename
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-09-03T17:50:25
2024-09-03T18:01:54
2024-09-03T18:01:52
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6610", "html_url": "https://github.com/ollama/ollama/pull/6610", "diff_url": "https://github.com/ollama/ollama/pull/6610.diff", "patch_url": "https://github.com/ollama/ollama/pull/6610.patch", "merged_at": "2024-09-03T18:01:52" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6610/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6610/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2381
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2381/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2381/comments
https://api.github.com/repos/ollama/ollama/issues/2381/events
https://github.com/ollama/ollama/pull/2381
2,122,254,714
PR_kwDOJ0Z1Ps5mOJUv
2,381
[fix] /bye and /exit are now treated as prefixes
{ "login": "lulzshadowwalker", "id": 92385434, "node_id": "U_kgDOBYGwmg", "avatar_url": "https://avatars.githubusercontent.com/u/92385434?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lulzshadowwalker", "html_url": "https://github.com/lulzshadowwalker", "followers_url": "https://api.github.com/users/lulzshadowwalker/followers", "following_url": "https://api.github.com/users/lulzshadowwalker/following{/other_user}", "gists_url": "https://api.github.com/users/lulzshadowwalker/gists{/gist_id}", "starred_url": "https://api.github.com/users/lulzshadowwalker/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lulzshadowwalker/subscriptions", "organizations_url": "https://api.github.com/users/lulzshadowwalker/orgs", "repos_url": "https://api.github.com/users/lulzshadowwalker/repos", "events_url": "https://api.github.com/users/lulzshadowwalker/events{/privacy}", "received_events_url": "https://api.github.com/users/lulzshadowwalker/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-02-07T06:03:15
2024-02-20T02:56:49
2024-02-20T02:56:49
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2381", "html_url": "https://github.com/ollama/ollama/pull/2381", "diff_url": "https://github.com/ollama/ollama/pull/2381.diff", "patch_url": "https://github.com/ollama/ollama/pull/2381.patch", "merged_at": "2024-02-20T02:56:49" }
instead of being treated as entire lines which doesn't align with the way the rest of the commands are treated it was a little annoying typing "/bye " and not having it work as expected
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2381/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2381/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3417
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3417/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3417/comments
https://api.github.com/repos/ollama/ollama/issues/3417/events
https://github.com/ollama/ollama/issues/3417
2,216,523,216
I_kwDOJ0Z1Ps6EHXXQ
3,417
Docker with NVIDIA GPU: "Unable to load cudart CUDA management library"
{ "login": "K-J-VV", "id": 113187314, "node_id": "U_kgDOBr8Z8g", "avatar_url": "https://avatars.githubusercontent.com/u/113187314?v=4", "gravatar_id": "", "url": "https://api.github.com/users/K-J-VV", "html_url": "https://github.com/K-J-VV", "followers_url": "https://api.github.com/users/K-J-VV/followers", "following_url": "https://api.github.com/users/K-J-VV/following{/other_user}", "gists_url": "https://api.github.com/users/K-J-VV/gists{/gist_id}", "starred_url": "https://api.github.com/users/K-J-VV/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/K-J-VV/subscriptions", "organizations_url": "https://api.github.com/users/K-J-VV/orgs", "repos_url": "https://api.github.com/users/K-J-VV/repos", "events_url": "https://api.github.com/users/K-J-VV/events{/privacy}", "received_events_url": "https://api.github.com/users/K-J-VV/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6430601766, "node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg", "url": "https://api.github.com/repos/ollama/ollama/labels/nvidia", "name": "nvidia", "color": "8CDB00", "default": false, "description": "Issues relating to Nvidia GPUs and CUDA" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
6
2024-03-30T15:15:17
2024-04-13T13:46:25
2024-04-13T13:46:25
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I am trying to run ollama in docker and to have it utilize my NVIDIA GPU, but keep getting in the error message it cannot load cudart library. This is a fresh installation on Debian. When running the NVIDIA workload sample example of "sudo docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi" I do get correct output displaying my GPU info as well as CUDA version. Here are my logs: ``` with OLLAMA_DEBUG not set time=2024-03-30T15:05:39.537Z level=INFO source=images.go:804 msg="total blobs: 0" time=2024-03-30T15:05:39.540Z level=INFO source=images.go:811 msg="total unused blobs removed: 0" time=2024-03-30T15:05:39.543Z level=INFO source=routes.go:1118 msg="Listening on [::]:11434 (version 0.1.30)" time=2024-03-30T15:05:39.543Z level=INFO source=payload_common.go:113 msg="Extracting dynamic libraries to /tmp/ollama4102197452/runners ..." time=2024-03-30T15:05:41.303Z level=INFO source=payload_common.go:140 msg="Dynamic LLM libraries [cpu_avx cpu_avx2 cuda_v11 cpu rocm_v60000]" time=2024-03-30T15:05:41.303Z level=INFO source=gpu.go:115 msg="Detecting GPU type" time=2024-03-30T15:05:41.303Z level=INFO source=gpu.go:265 msg="Searching for GPU management library libcudart.so*" time=2024-03-30T15:05:41.304Z level=INFO source=gpu.go:311 msg="Discovered GPU libraries: [/tmp/ollama4102197452/runners/cuda_v11/libcudart.so.11.0]" time=2024-03-30T15:05:41.304Z level=INFO source=gpu.go:340 msg="Unable to load cudart CUDA management library /tmp/ollama4102197452/runners/cuda_v11/libcudart.so.11.0: cudart init failure: 35" time=2024-03-30T15:05:41.304Z level=INFO source=gpu.go:265 msg="Searching for GPU management library libnvidia-ml.so" time=2024-03-30T15:05:41.305Z level=INFO source=gpu.go:311 msg="Discovered GPU libraries: []" time=2024-03-30T15:05:41.305Z level=INFO source=cpu_common.go:11 msg="CPU has AVX2" time=2024-03-30T15:05:41.305Z level=WARN source=amd_linux.go:53 msg="ollama recommends running the https://www.amd.com/en/support/linux-drivers: amdgpu version file missing: /sys/module/amdgpu/version stat /sys/module/amdgpu/version: no such file or directory" time=2024-03-30T15:05:41.305Z level=INFO source=amd_linux.go:88 msg="detected amdgpu versions [gfx1036]" time=2024-03-30T15:05:41.305Z level=WARN source=amd_linux.go:350 msg="amdgpu detected, but no compatible rocm library found. Either install rocm v6, or follow manual install instructions at https://github.com/ollama/ollama/blob/main/docs/linux.md#manual-install" time=2024-03-30T15:05:41.305Z level=WARN source=amd_linux.go:99 msg="unable to verify rocm library, will use cpu: no suitable rocm found, falling back to CPU" time=2024-03-30T15:05:41.305Z level=INFO source=routes.go:1141 msg="no GPU detected" with OLLAMA_DEBUG=1 time=2024-03-30T15:10:04.139Z level=INFO source=images.go:804 msg="total blobs: 0" time=2024-03-30T15:10:04.142Z level=INFO source=images.go:811 msg="total unused blobs removed: 0" time=2024-03-30T15:10:04.145Z level=INFO source=routes.go:1118 msg="Listening on [::]:11434 (version 0.1.30)" time=2024-03-30T15:10:04.145Z level=INFO source=payload_common.go:113 msg="Extracting dynamic libraries to /tmp/ollama1959530181/runners ..." time=2024-03-30T15:10:05.968Z level=INFO source=payload_common.go:140 msg="Dynamic LLM libraries [rocm_v60000 cpu_avx cpu cpu_avx2 cuda_v11]" time=2024-03-30T15:10:05.968Z level=DEBUG source=payload_common.go:141 msg="Override detection logic by setting OLLAMA_LLM_LIBRARY" time=2024-03-30T15:10:05.968Z level=INFO source=gpu.go:115 msg="Detecting GPU type" time=2024-03-30T15:10:05.968Z level=INFO source=gpu.go:265 msg="Searching for GPU management library libcudart.so*" time=2024-03-30T15:10:05.968Z level=DEBUG source=gpu.go:283 msg="gpu management search paths: [/tmp/ollama1959530181/runners/cuda*/libcudart.so* /usr/local/cuda/lib64/libcudart.so* /usr/lib/x86_64-linux-gnu/nvidia/current/libcudart.so* /usr/lib/x86_64-linux-gnu/libcudart.so* /usr/lib/wsl/lib/libcudart.so* /usr/lib/wsl/drivers/*/libcudart.so* /opt/cuda/lib64/libcudart.so* /usr/local/cuda*/targets/aarch64-linux/lib/libcudart.so* /usr/lib/aarch64-linux-gnu/nvidia/current/libcudart.so* /usr/lib/aarch64-linux-gnu/libcudart.so* /usr/local/cuda/lib*/libcudart.so* /usr/lib*/libcudart.so* /usr/local/lib*/libcudart.so* /usr/local/nvidia/lib/libcudart.so** /usr/local/nvidia/lib64/libcudart.so**]" time=2024-03-30T15:10:05.968Z level=INFO source=gpu.go:311 msg="Discovered GPU libraries: [/tmp/ollama1959530181/runners/cuda_v11/libcudart.so.11.0]" wiring cudart library functions in /tmp/ollama1959530181/runners/cuda_v11/libcudart.so.11.0 dlsym: cudaSetDevice dlsym: cudaDeviceSynchronize dlsym: cudaDeviceReset dlsym: cudaMemGetInfo dlsym: cudaGetDeviceCount dlsym: cudaDeviceGetAttribute dlsym: cudaDriverGetVersion cudaSetDevice err: 35 time=2024-03-30T15:10:05.968Z level=INFO source=gpu.go:340 msg="Unable to load cudart CUDA management library /tmp/ollama1959530181/runners/cuda_v11/libcudart.so.11.0: cudart init failure: 35" time=2024-03-30T15:10:05.968Z level=INFO source=gpu.go:265 msg="Searching for GPU management library libnvidia-ml.so" time=2024-03-30T15:10:05.968Z level=DEBUG source=gpu.go:283 msg="gpu management search paths: [/usr/local/cuda/lib64/libnvidia-ml.so* /usr/lib/x86_64-linux-gnu/nvidia/current/libnvidia-ml.so* /usr/lib/x86_64-linux-gnu/libnvidia-ml.so* /usr/lib/wsl/lib/libnvidia-ml.so* /usr/lib/wsl/drivers/*/libnvidia-ml.so* /opt/cuda/lib64/libnvidia-ml.so* /usr/lib*/libnvidia-ml.so* /usr/lib/aarch64-linux-gnu/nvidia/current/libnvidia-ml.so* /usr/lib/aarch64-linux-gnu/libnvidia-ml.so* /usr/local/lib*/libnvidia-ml.so* /opt/cuda/targets/x86_64-linux/lib/stubs/libnvidia-ml.so* /usr/local/nvidia/lib/libnvidia-ml.so* /usr/local/nvidia/lib64/libnvidia-ml.so*]" time=2024-03-30T15:10:05.968Z level=INFO source=gpu.go:311 msg="Discovered GPU libraries: []" time=2024-03-30T15:10:05.968Z level=INFO source=cpu_common.go:11 msg="CPU has AVX2" time=2024-03-30T15:10:05.968Z level=WARN source=amd_linux.go:53 msg="ollama recommends running the https://www.amd.com/en/support/linux-drivers: amdgpu version file missing: /sys/module/amdgpu/version stat /sys/module/amdgpu/version: no such file or directory" time=2024-03-30T15:10:05.968Z level=INFO source=amd_linux.go:88 msg="detected amdgpu versions [gfx1036]" time=2024-03-30T15:10:05.968Z level=DEBUG source=amd_common.go:16 msg="evaluating potential rocm lib dir /tmp/ollama1959530181/rocm" time=2024-03-30T15:10:05.968Z level=DEBUG source=amd_common.go:16 msg="evaluating potential rocm lib dir /usr/bin" time=2024-03-30T15:10:05.968Z level=DEBUG source=amd_common.go:16 msg="evaluating potential rocm lib dir /usr/bin/rocm" time=2024-03-30T15:10:05.968Z level=DEBUG source=amd_common.go:16 msg="evaluating potential rocm lib dir /usr/share/ollama/lib/rocm" time=2024-03-30T15:10:05.968Z level=DEBUG source=amd_common.go:16 msg="evaluating potential rocm lib dir /usr/local/nvidia/lib" time=2024-03-30T15:10:05.968Z level=DEBUG source=amd_common.go:16 msg="evaluating potential rocm lib dir /usr/local/nvidia/lib64" time=2024-03-30T15:10:05.968Z level=DEBUG source=amd_common.go:16 msg="evaluating potential rocm lib dir /opt/rocm/lib" time=2024-03-30T15:10:05.968Z level=WARN source=amd_linux.go:350 msg="amdgpu detected, but no compatible rocm library found. Either install rocm v6, or follow manual install instructions at https://github.com/ollama/ollama/blob/main/docs/linux.md#manual-install" time=2024-03-30T15:10:05.968Z level=WARN source=amd_linux.go:99 msg="unable to verify rocm library, will use cpu: no suitable rocm found, falling back to CPU" time=2024-03-30T15:10:05.968Z level=INFO source=routes.go:1141 msg="no GPU detected" ``` ### What did you expect to see? _GPU detected & in use_ ### Steps to reproduce Running docker-compose up -d with the following files: docker-compose.yml ``` version: "3.8" services: ## Ollama llm: image: 'ollama/ollama:${LLM_TAG}' container_name: '${LLM_NAME}' hostname: '${LLM_NAME}' ports: - 11434:11434 env_file: - .env volumes: - ./llm:/root/.ollama restart: 'always' ``` .env ``` COMPOSE_PROJECT_NAME=ollama LLM_NAME=${COMPOSE_PROJECT_NAME} LLM_TAG=0.1.30 #OLLAMA_DEBUG=1 gpus=all ``` ### Are there any recent changes that introduced the issue? _No response_ ### OS Linux ### Architecture amd64 ### Platform Docker ### Ollama version 0.1.30 ### GPU Nvidia ### GPU info sudo docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi ``` Sat Mar 30 15:03:10 2024 +-----------------------------------------------------------------------------------------+ | NVIDIA-SMI 550.54.14 Driver Version: 550.54.14 CUDA Version: 12.4 | |-----------------------------------------+------------------------+----------------------+ | GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |=========================================+========================+======================| | 0 NVIDIA GeForce RTX 4060 Ti Off | 00000000:02:00.0 Off | N/A | | 0% 46C P0 37W / 165W | 0MiB / 16380MiB | 3% Default | | | | N/A | +-----------------------------------------+------------------------+----------------------+ +-----------------------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=========================================================================================| | No running processes found | +-----------------------------------------------------------------------------------------+ ``` nvcc --version ``` nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2024 NVIDIA Corporation Built on Tue_Feb_27_16:19:38_PST_2024 Cuda compilation tools, release 12.4, V12.4.99 Build cuda_12.4.r12.4/compiler.33961263_0 ``` __ ### CPU AMD ### Other software _No response_
{ "login": "K-J-VV", "id": 113187314, "node_id": "U_kgDOBr8Z8g", "avatar_url": "https://avatars.githubusercontent.com/u/113187314?v=4", "gravatar_id": "", "url": "https://api.github.com/users/K-J-VV", "html_url": "https://github.com/K-J-VV", "followers_url": "https://api.github.com/users/K-J-VV/followers", "following_url": "https://api.github.com/users/K-J-VV/following{/other_user}", "gists_url": "https://api.github.com/users/K-J-VV/gists{/gist_id}", "starred_url": "https://api.github.com/users/K-J-VV/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/K-J-VV/subscriptions", "organizations_url": "https://api.github.com/users/K-J-VV/orgs", "repos_url": "https://api.github.com/users/K-J-VV/repos", "events_url": "https://api.github.com/users/K-J-VV/events{/privacy}", "received_events_url": "https://api.github.com/users/K-J-VV/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3417/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3417/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4004
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4004/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4004/comments
https://api.github.com/repos/ollama/ollama/issues/4004/events
https://github.com/ollama/ollama/issues/4004
2,267,603,319
I_kwDOJ0Z1Ps6HKOF3
4,004
Different folders for (sha 256) models
{ "login": "milenamilka755", "id": 149798060, "node_id": "U_kgDOCO28rA", "avatar_url": "https://avatars.githubusercontent.com/u/149798060?v=4", "gravatar_id": "", "url": "https://api.github.com/users/milenamilka755", "html_url": "https://github.com/milenamilka755", "followers_url": "https://api.github.com/users/milenamilka755/followers", "following_url": "https://api.github.com/users/milenamilka755/following{/other_user}", "gists_url": "https://api.github.com/users/milenamilka755/gists{/gist_id}", "starred_url": "https://api.github.com/users/milenamilka755/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/milenamilka755/subscriptions", "organizations_url": "https://api.github.com/users/milenamilka755/orgs", "repos_url": "https://api.github.com/users/milenamilka755/repos", "events_url": "https://api.github.com/users/milenamilka755/events{/privacy}", "received_events_url": "https://api.github.com/users/milenamilka755/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
2
2024-04-28T14:25:40
2024-04-29T16:43:40
2024-04-29T15:32:44
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
The program stores ALL models in parts in a directory: ollama\models\blobs With the various problems of downloading models, moving them to another location, etc., this causes a problem, because you have to work a bit to figure out which parts belong to which model. It would be a good idea to put each model in parts into a PERSONAL catalog. e.g. ollama_models\blobs\lama3 ollama_models\blobs\gemma ollama_models\blobs\mistral7b ollama\models\blobs\llava:34b
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4004/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4004/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7831
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7831/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7831/comments
https://api.github.com/repos/ollama/ollama/issues/7831/events
https://github.com/ollama/ollama/pull/7831
2,691,677,333
PR_kwDOJ0Z1Ps6DEzb_
7,831
docs: add HoneyHive under observability
{ "login": "sunnybak", "id": 25540727, "node_id": "MDQ6VXNlcjI1NTQwNzI3", "avatar_url": "https://avatars.githubusercontent.com/u/25540727?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sunnybak", "html_url": "https://github.com/sunnybak", "followers_url": "https://api.github.com/users/sunnybak/followers", "following_url": "https://api.github.com/users/sunnybak/following{/other_user}", "gists_url": "https://api.github.com/users/sunnybak/gists{/gist_id}", "starred_url": "https://api.github.com/users/sunnybak/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sunnybak/subscriptions", "organizations_url": "https://api.github.com/users/sunnybak/orgs", "repos_url": "https://api.github.com/users/sunnybak/repos", "events_url": "https://api.github.com/users/sunnybak/events{/privacy}", "received_events_url": "https://api.github.com/users/sunnybak/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-11-25T17:47:06
2024-11-25T17:55:33
2024-11-25T17:55:33
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7831", "html_url": "https://github.com/ollama/ollama/pull/7831", "diff_url": "https://github.com/ollama/ollama/pull/7831.diff", "patch_url": "https://github.com/ollama/ollama/pull/7831.patch", "merged_at": "2024-11-25T17:55:33" }
Adds HoneyHive and link to its Ollama integration under Observability in README.md
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7831/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7831/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3280
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3280/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3280/comments
https://api.github.com/repos/ollama/ollama/issues/3280/events
https://github.com/ollama/ollama/issues/3280
2,199,496,591
I_kwDOJ0Z1Ps6DGaeP
3,280
Please support HPT (Air)
{ "login": "RahulBhalley", "id": 9640948, "node_id": "MDQ6VXNlcjk2NDA5NDg=", "avatar_url": "https://avatars.githubusercontent.com/u/9640948?v=4", "gravatar_id": "", "url": "https://api.github.com/users/RahulBhalley", "html_url": "https://github.com/RahulBhalley", "followers_url": "https://api.github.com/users/RahulBhalley/followers", "following_url": "https://api.github.com/users/RahulBhalley/following{/other_user}", "gists_url": "https://api.github.com/users/RahulBhalley/gists{/gist_id}", "starred_url": "https://api.github.com/users/RahulBhalley/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RahulBhalley/subscriptions", "organizations_url": "https://api.github.com/users/RahulBhalley/orgs", "repos_url": "https://api.github.com/users/RahulBhalley/repos", "events_url": "https://api.github.com/users/RahulBhalley/events{/privacy}", "received_events_url": "https://api.github.com/users/RahulBhalley/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
0
2024-03-21T08:19:41
2024-05-14T14:16:47
2024-05-14T14:16:47
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What model would you like? They say HPT Air is open aource at [HyperGAI/HPT](https://github.com/HyperGAI/HPT). Would really like to use it locally on M1 Air.
{ "login": "RahulBhalley", "id": 9640948, "node_id": "MDQ6VXNlcjk2NDA5NDg=", "avatar_url": "https://avatars.githubusercontent.com/u/9640948?v=4", "gravatar_id": "", "url": "https://api.github.com/users/RahulBhalley", "html_url": "https://github.com/RahulBhalley", "followers_url": "https://api.github.com/users/RahulBhalley/followers", "following_url": "https://api.github.com/users/RahulBhalley/following{/other_user}", "gists_url": "https://api.github.com/users/RahulBhalley/gists{/gist_id}", "starred_url": "https://api.github.com/users/RahulBhalley/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RahulBhalley/subscriptions", "organizations_url": "https://api.github.com/users/RahulBhalley/orgs", "repos_url": "https://api.github.com/users/RahulBhalley/repos", "events_url": "https://api.github.com/users/RahulBhalley/events{/privacy}", "received_events_url": "https://api.github.com/users/RahulBhalley/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3280/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3280/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/4325
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4325/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4325/comments
https://api.github.com/repos/ollama/ollama/issues/4325/events
https://github.com/ollama/ollama/issues/4325
2,290,420,714
I_kwDOJ0Z1Ps6IhQvq
4,325
Model request: Functionary v2.4.
{ "login": "rvsh2", "id": 32043169, "node_id": "MDQ6VXNlcjMyMDQzMTY5", "avatar_url": "https://avatars.githubusercontent.com/u/32043169?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rvsh2", "html_url": "https://github.com/rvsh2", "followers_url": "https://api.github.com/users/rvsh2/followers", "following_url": "https://api.github.com/users/rvsh2/following{/other_user}", "gists_url": "https://api.github.com/users/rvsh2/gists{/gist_id}", "starred_url": "https://api.github.com/users/rvsh2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rvsh2/subscriptions", "organizations_url": "https://api.github.com/users/rvsh2/orgs", "repos_url": "https://api.github.com/users/rvsh2/repos", "events_url": "https://api.github.com/users/rvsh2/events{/privacy}", "received_events_url": "https://api.github.com/users/rvsh2/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
0
2024-05-10T21:12:59
2024-05-10T21:12:59
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello, Is there a possibility to run ollama with functionary small v2.4? It it very good model for tool calling. BR Krzysztof
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4325/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4325/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/6275
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6275/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6275/comments
https://api.github.com/repos/ollama/ollama/issues/6275/events
https://github.com/ollama/ollama/issues/6275
2,457,203,459
I_kwDOJ0Z1Ps6SdfMD
6,275
provide better hashing algorithm
{ "login": "olumolu", "id": 162728301, "node_id": "U_kgDOCbMJbQ", "avatar_url": "https://avatars.githubusercontent.com/u/162728301?v=4", "gravatar_id": "", "url": "https://api.github.com/users/olumolu", "html_url": "https://github.com/olumolu", "followers_url": "https://api.github.com/users/olumolu/followers", "following_url": "https://api.github.com/users/olumolu/following{/other_user}", "gists_url": "https://api.github.com/users/olumolu/gists{/gist_id}", "starred_url": "https://api.github.com/users/olumolu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/olumolu/subscriptions", "organizations_url": "https://api.github.com/users/olumolu/orgs", "repos_url": "https://api.github.com/users/olumolu/repos", "events_url": "https://api.github.com/users/olumolu/events{/privacy}", "received_events_url": "https://api.github.com/users/olumolu/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
1
2024-08-09T06:30:16
2024-08-15T19:25:11
2024-08-15T19:25:11
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Blake3 is better faster more secure and more advance switching to this makes sense so switch to it from sha256 will give a better faster safer alternative.
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6275/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6275/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/4560
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4560/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4560/comments
https://api.github.com/repos/ollama/ollama/issues/4560/events
https://github.com/ollama/ollama/issues/4560
2,308,625,446
I_kwDOJ0Z1Ps6JmtQm
4,560
phi3 medium small vision
{ "login": "olumolu", "id": 162728301, "node_id": "U_kgDOCbMJbQ", "avatar_url": "https://avatars.githubusercontent.com/u/162728301?v=4", "gravatar_id": "", "url": "https://api.github.com/users/olumolu", "html_url": "https://github.com/olumolu", "followers_url": "https://api.github.com/users/olumolu/followers", "following_url": "https://api.github.com/users/olumolu/following{/other_user}", "gists_url": "https://api.github.com/users/olumolu/gists{/gist_id}", "starred_url": "https://api.github.com/users/olumolu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/olumolu/subscriptions", "organizations_url": "https://api.github.com/users/olumolu/orgs", "repos_url": "https://api.github.com/users/olumolu/repos", "events_url": "https://api.github.com/users/olumolu/events{/privacy}", "received_events_url": "https://api.github.com/users/olumolu/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
18
2024-05-21T15:57:52
2024-09-26T15:10:30
null
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
https://huggingface.co/microsoft/Phi-3-medium-128k-instruct https://huggingface.co/microsoft/Phi-3-medium-4k-instruct https://huggingface.co/microsoft/Phi-3-small-8k-instruct https://huggingface.co/microsoft/Phi-3-small-128k-instruct Suggested by @Qualzz https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/
{ "login": "olumolu", "id": 162728301, "node_id": "U_kgDOCbMJbQ", "avatar_url": "https://avatars.githubusercontent.com/u/162728301?v=4", "gravatar_id": "", "url": "https://api.github.com/users/olumolu", "html_url": "https://github.com/olumolu", "followers_url": "https://api.github.com/users/olumolu/followers", "following_url": "https://api.github.com/users/olumolu/following{/other_user}", "gists_url": "https://api.github.com/users/olumolu/gists{/gist_id}", "starred_url": "https://api.github.com/users/olumolu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/olumolu/subscriptions", "organizations_url": "https://api.github.com/users/olumolu/orgs", "repos_url": "https://api.github.com/users/olumolu/repos", "events_url": "https://api.github.com/users/olumolu/events{/privacy}", "received_events_url": "https://api.github.com/users/olumolu/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4560/reactions", "total_count": 58, "+1": 45, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 10, "eyes": 3 }
https://api.github.com/repos/ollama/ollama/issues/4560/timeline
null
reopened
false
https://api.github.com/repos/ollama/ollama/issues/1537
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1537/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1537/comments
https://api.github.com/repos/ollama/ollama/issues/1537/events
https://github.com/ollama/ollama/issues/1537
2,042,803,380
I_kwDOJ0Z1Ps55wrS0
1,537
Error getting mixtral
{ "login": "JimBabcock59", "id": 5861727, "node_id": "MDQ6VXNlcjU4NjE3Mjc=", "avatar_url": "https://avatars.githubusercontent.com/u/5861727?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JimBabcock59", "html_url": "https://github.com/JimBabcock59", "followers_url": "https://api.github.com/users/JimBabcock59/followers", "following_url": "https://api.github.com/users/JimBabcock59/following{/other_user}", "gists_url": "https://api.github.com/users/JimBabcock59/gists{/gist_id}", "starred_url": "https://api.github.com/users/JimBabcock59/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JimBabcock59/subscriptions", "organizations_url": "https://api.github.com/users/JimBabcock59/orgs", "repos_url": "https://api.github.com/users/JimBabcock59/repos", "events_url": "https://api.github.com/users/JimBabcock59/events{/privacy}", "received_events_url": "https://api.github.com/users/JimBabcock59/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
3
2023-12-15T02:54:49
2023-12-19T20:18:02
2023-12-19T20:18:02
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Like everyone, we wanted to try mixtral. I downloaded it. Below is my experience: ollama run mixtral pulling manifest pulling 9cd37fe774bf... 100% ▕█████████████████████████████████████████████████████▏ (26/26 GB, 4.7 MB/s) pulling 79b7eca19f7a... 100% ▕██████████████████████████████████████████████████████████████▏ (43/43 B, 34 B/s) pulling f427553a0d85... 100% ▕██████████████████████████████████████████████████████████████▏ (63/63 B, 46 B/s) pulling 302c7acb7ff7... 100% ▕███████████████████████████████████████████████████████████▏ (410/410 B, 195 B/s) verifying sha256 digest writing manifest removing any unused layers success ⠏ Error: llama runner: failed to load model '/usr/share/ollama/.ollama/models/blobs/sha256:9cd37fe774bf3be341e1ff913a18518cee43d2350ee7107035b5a3a27468c0d4': this model may be incompatible with your version of Ollama. If you previously pulled this model, try updating it by running `ollama pull mixtral:latest` jim@Jim:~$ ollama run mixtral ⠸ Error: llama runner: failed to load model '/usr/share/ollama/.ollama/models/blobs/sha256:9cd37fe774bf3be341e1ff913a18518cee43d2350ee7107035b5a3a27468c0d4': this model may be incompatible with your version of Ollama. If you previously pulled this model, try updating it by running `ollama pull mixtral:latest` jim@Jim:~$ ollama pull mixtral:latest pulling manifest pulling 9cd37fe774bf... 100% ▕███████████████████████████████████████████████████████████▏ (26/26 GB, 284 TB/s) pulling 79b7eca19f7a... 100% ▕████████████████████████████████████████████████████████████▏ (43/43 B, 1.9 MB/s) pulling f427553a0d85... 100% ▕████████████████████████████████████████████████████████████▏ (63/63 B, 2.9 MB/s) pulling 302c7acb7ff7... 100% ▕██████████████████████████████████████████████████████████▏ (410/410 B, 9.9 MB/s) verifying sha256 digest writing manifest removing any unused layers success jim@Jim:~$ ollama run mixtral:latest ⠙ Error: llama runner: failed to load model '/usr/share/ollama/.ollama/models/blobs/sha256:9cd37fe774bf3be341e1ff913a18518cee43d2350ee7107035b5a3a27468c0d4': this model may be incompatible with your version of Ollama. If you previously pulled this model, try updating it by running `ollama pull mixtral:latest` jim@Jim:~$ ollama pull mixtral:latest pulling manifest pulling 9cd37fe774bf... 100% ▕████████████████████████████████████████████████████████████▏ (26/26 GB, 75 TB/s) pulling 79b7eca19f7a... 100% ▕████████████████████████████████████████████████████████████▏ (43/43 B, 965 kB/s) pulling f427553a0d85... 100% ▕████████████████████████████████████████████████████████████▏ (63/63 B, 557 kB/s) pulling 302c7acb7ff7... 100% ▕██████████████████████████████████████████████████████████▏ (410/410 B, 9.4 MB/s) verifying sha256 digest writing manifest removing any unused layers success jim@Jim:~$ ollama run mixtral ⠙ Error: llama runner: failed to load model '/usr/share/ollama/.ollama/models/blobs/sha256:9cd37fe774bf3be341e1ff913a18518cee43d2350ee7107035b5a3a27468c0d4': this model may be incompatible with your version of Ollama. If you previously pulled this model, try updating it by running `ollama pull mixtral:latest` I have successfully downloaded literally dozens of models through the ollama site and never an issue. Any suggestions?
{ "login": "technovangelist", "id": 633681, "node_id": "MDQ6VXNlcjYzMzY4MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4", "gravatar_id": "", "url": "https://api.github.com/users/technovangelist", "html_url": "https://github.com/technovangelist", "followers_url": "https://api.github.com/users/technovangelist/followers", "following_url": "https://api.github.com/users/technovangelist/following{/other_user}", "gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}", "starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions", "organizations_url": "https://api.github.com/users/technovangelist/orgs", "repos_url": "https://api.github.com/users/technovangelist/repos", "events_url": "https://api.github.com/users/technovangelist/events{/privacy}", "received_events_url": "https://api.github.com/users/technovangelist/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1537/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1537/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6466
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6466/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6466/comments
https://api.github.com/repos/ollama/ollama/issues/6466/events
https://github.com/ollama/ollama/issues/6466
2,481,558,523
I_kwDOJ0Z1Ps6T6ZP7
6,466
I can not push 8g model to Ollama
{ "login": "hololeo", "id": 11970940, "node_id": "MDQ6VXNlcjExOTcwOTQw", "avatar_url": "https://avatars.githubusercontent.com/u/11970940?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hololeo", "html_url": "https://github.com/hololeo", "followers_url": "https://api.github.com/users/hololeo/followers", "following_url": "https://api.github.com/users/hololeo/following{/other_user}", "gists_url": "https://api.github.com/users/hololeo/gists{/gist_id}", "starred_url": "https://api.github.com/users/hololeo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hololeo/subscriptions", "organizations_url": "https://api.github.com/users/hololeo/orgs", "repos_url": "https://api.github.com/users/hololeo/repos", "events_url": "https://api.github.com/users/hololeo/events{/privacy}", "received_events_url": "https://api.github.com/users/hololeo/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
11
2024-08-22T19:29:51
2024-11-29T21:25:55
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? using >ollama push me/mymodel im getting upload rate like this > 39 MB/8.5 GB 1.4 MB/s 1h41m it will chug chug chug to 40 or 50% then i get the error > Error: max retries exceeded: http status 502 Bad Gateway: <?xml version="1.0" encoding="UTF-8"?><Error><Code>InternalError</Code><Message>We encountered an internal connectivity issue. Please try again.</Message></Error> im wondering if my ISP is getting me off? Or perhaps im not uploading fast enough so ollama server cuts me off? any tips on how to push largish models? it will do 4g ok ### OS macOS ### GPU Apple ### CPU Apple ### Ollama version 0.3.6
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6466/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6466/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/5850
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5850/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5850/comments
https://api.github.com/repos/ollama/ollama/issues/5850/events
https://github.com/ollama/ollama/issues/5850
2,422,871,598
I_kwDOJ0Z1Ps6QahYu
5,850
Error loading model on 0.2.8-rc1-rocm
{ "login": "dvelez3815", "id": 40648189, "node_id": "MDQ6VXNlcjQwNjQ4MTg5", "avatar_url": "https://avatars.githubusercontent.com/u/40648189?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dvelez3815", "html_url": "https://github.com/dvelez3815", "followers_url": "https://api.github.com/users/dvelez3815/followers", "following_url": "https://api.github.com/users/dvelez3815/following{/other_user}", "gists_url": "https://api.github.com/users/dvelez3815/gists{/gist_id}", "starred_url": "https://api.github.com/users/dvelez3815/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dvelez3815/subscriptions", "organizations_url": "https://api.github.com/users/dvelez3815/orgs", "repos_url": "https://api.github.com/users/dvelez3815/repos", "events_url": "https://api.github.com/users/dvelez3815/events{/privacy}", "received_events_url": "https://api.github.com/users/dvelez3815/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6433346500, "node_id": "LA_kwDOJ0Z1Ps8AAAABf3UTxA", "url": "https://api.github.com/repos/ollama/ollama/labels/amd", "name": "amd", "color": "000000", "default": false, "description": "Issues relating to AMD GPUs and ROCm" } ]
closed
false
null
[]
null
2
2024-07-22T13:22:10
2024-07-23T17:56:04
2024-07-23T17:56:03
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? segmentation fault (core dumped) error ``` 2024/07/22 13:17:50 routes.go:1100: INFO server config env="map[CUDA_VISIBLE_DEVICES: GPU_DEVICE_ORDINAL: HIP_VISIBLE_DEVICES: HSA_OVERRIDE_GFX_VERSION: OLLAMA_DEBUG:false OLLAMA_FLASH_ATTENTION:false OLLAMA_HOST:http://0.0.0.0:11434 OLLAMA_INTEL_GPU:false OLLAMA_KEEP_ALIVE:5m0s OLLAMA_LLM_LIBRARY: OLLAMA_MAX_LOADED_MODELS:0 OLLAMA_MAX_QUEUE:512 OLLAMA_MAX_VRAM:0 OLLAMA_MODELS:/root/.ollama/models OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:0 OLLAMA_ORIGINS:[http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:* app://* file://* tauri://*] OLLAMA_RUNNERS_DIR: OLLAMA_SCHED_SPREAD:false OLLAMA_TMPDIR: ROCR_VISIBLE_DEVICES:]" time=2024-07-22T13:17:50.145Z level=INFO source=images.go:784 msg="total blobs: 23" time=2024-07-22T13:17:50.145Z level=INFO source=images.go:791 msg="total unused blobs removed: 0" time=2024-07-22T13:17:50.145Z level=INFO source=routes.go:1147 msg="Listening on [::]:11434 (version 0.2.8-rc1)" time=2024-07-22T13:17:50.145Z level=INFO source=payload.go:30 msg="extracting embedded files" dir=/tmp/ollama2936133361/runners time=2024-07-22T13:17:51.862Z level=INFO source=payload.go:44 msg="Dynamic LLM libraries [cpu cpu_avx cpu_avx2 cuda_v11 rocm_v60102]" time=2024-07-22T13:17:51.862Z level=INFO source=gpu.go:205 msg="looking for compatible GPUs" time=2024-07-22T13:17:51.867Z level=INFO source=amd_linux.go:330 msg="amdgpu is supported" gpu=0 gpu_type=gfx1100 time=2024-07-22T13:17:51.868Z level=INFO source=amd_linux.go:330 msg="amdgpu is supported" gpu=1 gpu_type=gfx1100 time=2024-07-22T13:17:51.869Z level=INFO source=amd_linux.go:330 msg="amdgpu is supported" gpu=2 gpu_type=gfx1100 time=2024-07-22T13:17:51.869Z level=INFO source=amd_linux.go:259 msg="unsupported Radeon iGPU detected skipping" id=3 total="512.0 MiB" time=2024-07-22T13:17:51.869Z level=INFO source=types.go:105 msg="inference compute" id=0 library=rocm compute=gfx1100 driver=6.7 name=1002:744c total="24.0 GiB" available="24.0 GiB" time=2024-07-22T13:17:51.869Z level=INFO source=types.go:105 msg="inference compute" id=1 library=rocm compute=gfx1100 driver=6.7 name=1002:744c total="24.0 GiB" available="24.0 GiB" time=2024-07-22T13:17:51.869Z level=INFO source=types.go:105 msg="inference compute" id=2 library=rocm compute=gfx1100 driver=6.7 name=1002:744c total="24.0 GiB" available="24.0 GiB" [GIN] 2024/07/22 - 13:18:52 | 200 | 30.718µs | 127.0.0.1 | HEAD "/" [GIN] 2024/07/22 - 13:18:52 | 200 | 713.463µs | 127.0.0.1 | GET "/api/tags" [GIN] 2024/07/22 - 13:19:01 | 200 | 17.853µs | 127.0.0.1 | HEAD "/" [GIN] 2024/07/22 - 13:19:01 | 200 | 15.69545ms | 127.0.0.1 | POST "/api/show" time=2024-07-22T13:19:01.124Z level=INFO source=sched.go:717 msg="new model will fit in available VRAM, loading" model=/root/.ollama/models/blobs/sha256-a130d63aaf6f095d56914771afa0eb92ae04620ad496710957e9d650e4485b72 library=rocm parallel=4 required="45.3 GiB" time=2024-07-22T13:19:01.125Z level=INFO source=memory.go:309 msg="offload to rocm" layers.requested=-1 layers.model=33 layers.offload=33 layers.split=11,11,11 memory.available="[24.0 GiB 24.0 GiB 24.0 GiB]" memory.required.full="45.3 GiB" memory.required.partial="45.3 GiB" memory.required.kv="1.0 GiB" memory.required.allocations="[15.4 GiB 15.4 GiB 14.4 GiB]" memory.weights.total="36.5 GiB" memory.weights.repeating="36.4 GiB" memory.weights.nonrepeating="102.6 MiB" memory.graph.full="1.3 GiB" memory.graph.partial="1.3 GiB" time=2024-07-22T13:19:01.125Z level=INFO source=server.go:383 msg="starting llama server" cmd="/tmp/ollama2936133361/runners/rocm_v60102/ollama_llama_server --model /root/.ollama/models/blobs/sha256-a130d63aaf6f095d56914771afa0eb92ae04620ad496710957e9d650e4485b72 --ctx-size 8192 --batch-size 512 --embedding --log-disable --n-gpu-layers 33 --parallel 4 --tensor-split 11,11,11 --port 37323" time=2024-07-22T13:19:01.125Z level=INFO source=sched.go:437 msg="loaded runners" count=1 time=2024-07-22T13:19:01.126Z level=INFO source=server.go:573 msg="waiting for llama runner to start responding" time=2024-07-22T13:19:01.126Z level=INFO source=server.go:614 msg="waiting for server to become available" status="llm server error" INFO [main] build info | build=1 commit="a8db2a9" tid="127915017311040" timestamp=1721654341 INFO [main] system info | n_threads=16 n_threads_batch=-1 system_info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 0 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 0 | " tid="127915017311040" timestamp=1721654341 total_threads=32 INFO [main] HTTP server listening | hostname="127.0.0.1" n_threads_http="31" port="37323" tid="127915017311040" timestamp=1721654341 llama_model_loader: loaded meta data with 26 key-value pairs and 995 tensors from /root/.ollama/models/blobs/sha256-a130d63aaf6f095d56914771afa0eb92ae04620ad496710957e9d650e4485b72 (version GGUF V3 (latest)) llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = llama llama_model_loader: - kv 1: general.name str = mistralai llama_model_loader: - kv 2: llama.context_length u32 = 32768 llama_model_loader: - kv 3: llama.embedding_length u32 = 4096 llama_model_loader: - kv 4: llama.block_count u32 = 32 llama_model_loader: - kv 5: llama.feed_forward_length u32 = 14336 llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128 llama_model_loader: - kv 7: llama.attention.head_count u32 = 32 llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 8 llama_model_loader: - kv 9: llama.expert_count u32 = 8 llama_model_loader: - kv 10: llama.expert_used_count u32 = 2 llama_model_loader: - kv 11: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 llama_model_loader: - kv 12: llama.rope.freq_base f32 = 1000000.000000 llama_model_loader: - kv 13: general.file_type u32 = 18 llama_model_loader: - kv 14: tokenizer.ggml.model str = llama llama_model_loader: - kv 15: tokenizer.ggml.tokens arr[str,32000] = ["<unk>", "<s>", "</s>", "<0x00>", "<... llama_model_loader: - kv 16: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000... llama_model_loader: - kv 17: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ... llama_model_loader: - kv 18: tokenizer.ggml.merges arr[str,58980] = ["▁ t", "i n", "e r", "▁ a", "h e... llama_model_loader: - kv 19: tokenizer.ggml.bos_token_id u32 = 1 llama_model_loader: - kv 20: tokenizer.ggml.eos_token_id u32 = 2 llama_model_loader: - kv 21: tokenizer.ggml.unknown_token_id u32 = 0 llama_model_loader: - kv 22: tokenizer.ggml.add_bos_token bool = true llama_model_loader: - kv 23: tokenizer.ggml.add_eos_token bool = false llama_model_loader: - kv 24: tokenizer.chat_template str = {{ bos_token }}{% for message in mess... llama_model_loader: - kv 25: general.quantization_version u32 = 2 llama_model_loader: - type f32: 65 tensors llama_model_loader: - type f16: 32 tensors llama_model_loader: - type q8_0: 64 tensors llama_model_loader: - type q6_K: 834 tensors llm_load_vocab: special tokens cache size = 259 llm_load_vocab: token to piece cache size = 0.1637 MB llm_load_print_meta: format = GGUF V3 (latest) llm_load_print_meta: arch = llama llm_load_print_meta: vocab type = SPM llm_load_print_meta: n_vocab = 32000 llm_load_print_meta: n_merges = 0 llm_load_print_meta: vocab_only = 0 llm_load_print_meta: n_ctx_train = 32768 llm_load_print_meta: n_embd = 4096 llm_load_print_meta: n_layer = 32 llm_load_print_meta: n_head = 32 llm_load_print_meta: n_head_kv = 8 llm_load_print_meta: n_rot = 128 llm_load_print_meta: n_swa = 0 llm_load_print_meta: n_embd_head_k = 128 llm_load_print_meta: n_embd_head_v = 128 llm_load_print_meta: n_gqa = 4 llm_load_print_meta: n_embd_k_gqa = 1024 llm_load_print_meta: n_embd_v_gqa = 1024 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-05 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: f_logit_scale = 0.0e+00 llm_load_print_meta: n_ff = 14336 llm_load_print_meta: n_expert = 8 llm_load_print_meta: n_expert_used = 2 llm_load_print_meta: causal attn = 1 llm_load_print_meta: pooling type = 0 llm_load_print_meta: rope type = 0 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 1000000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_ctx_orig_yarn = 32768 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: ssm_d_conv = 0 llm_load_print_meta: ssm_d_inner = 0 llm_load_print_meta: ssm_d_state = 0 llm_load_print_meta: ssm_dt_rank = 0 llm_load_print_meta: model type = 8x7B llm_load_print_meta: model ftype = Q6_K llm_load_print_meta: model params = 46.70 B llm_load_print_meta: model size = 35.74 GiB (6.57 BPW) llm_load_print_meta: general.name = mistralai llm_load_print_meta: BOS token = 1 '<s>' llm_load_print_meta: EOS token = 2 '</s>' llm_load_print_meta: UNK token = 0 '<unk>' llm_load_print_meta: LF token = 13 '<0x0A>' llm_load_print_meta: max token length = 48 time=2024-07-22T13:19:01.377Z level=INFO source=server.go:614 msg="waiting for server to become available" status="llm server loading model" ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no ggml_cuda_init: found 3 ROCm devices: Device 0: Radeon RX 7900 XTX, compute capability 11.0, VMM: no Device 1: Radeon RX 7900 XTX, compute capability 11.0, VMM: no Device 2: Radeon RX 7900 XTX, compute capability 11.0, VMM: no llm_load_tensors: ggml ctx size = 1.53 MiB time=2024-07-22T13:19:05.338Z level=INFO source=server.go:614 msg="waiting for server to become available" status="llm server not responding" llm_load_tensors: offloading 32 repeating layers to GPU llm_load_tensors: offloading non-repeating layers to GPU llm_load_tensors: offloaded 33/33 layers to GPU llm_load_tensors: ROCm0 buffer size = 12510.78 MiB llm_load_tensors: ROCm1 buffer size = 12510.78 MiB llm_load_tensors: ROCm2 buffer size = 11475.99 MiB llm_load_tensors: ROCm_Host buffer size = 102.54 MiB time=2024-07-22T13:19:05.590Z level=INFO source=server.go:614 msg="waiting for server to become available" status="llm server loading model" time=2024-07-22T13:19:15.818Z level=INFO source=server.go:614 msg="waiting for server to become available" status="llm server not responding" time=2024-07-22T13:19:16.646Z level=INFO source=server.go:614 msg="waiting for server to become available" status="llm server loading model" llama_new_context_with_model: n_ctx = 8192 llama_new_context_with_model: n_batch = 512 llama_new_context_with_model: n_ubatch = 512 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 1000000.0 llama_new_context_with_model: freq_scale = 1 time=2024-07-22T13:19:18.099Z level=INFO source=server.go:614 msg="waiting for server to become available" status="llm server not responding" time=2024-07-22T13:19:18.912Z level=INFO source=server.go:614 msg="waiting for server to become available" status="llm server loading model" llama_kv_cache_init: ROCm0 KV buffer size = 352.00 MiB llama_kv_cache_init: ROCm1 KV buffer size = 352.00 MiB llama_kv_cache_init: ROCm2 KV buffer size = 320.00 MiB llama_new_context_with_model: KV self size = 1024.00 MiB, K (f16): 512.00 MiB, V (f16): 512.00 MiB llama_new_context_with_model: ROCm_Host output buffer size = 0.55 MiB llama_new_context_with_model: pipeline parallelism enabled (n_copies=4) llama_new_context_with_model: ROCm0 compute buffer size = 640.01 MiB llama_new_context_with_model: ROCm1 compute buffer size = 640.01 MiB llama_new_context_with_model: ROCm2 compute buffer size = 640.02 MiB llama_new_context_with_model: ROCm_Host compute buffer size = 72.02 MiB llama_new_context_with_model: graph nodes = 1510 llama_new_context_with_model: graph splits = 4 time=2024-07-22T13:19:20.722Z level=INFO source=server.go:614 msg="waiting for server to become available" status="llm server error" time=2024-07-22T13:19:20.973Z level=ERROR source=sched.go:443 msg="error loading llama server" error="llama runner process has terminated: signal: segmentation fault (core dumped) " [GIN] 2024/07/22 - 13:19:20 | 500 | 19.864840905s | 127.0.0.1 | POST "/api/chat" time=2024-07-22T13:19:25.973Z level=WARN source=sched.go:634 msg="gpu VRAM usage didn't recover within timeout" seconds=5.000268374 model=/root/.ollama/models/blobs/sha256-a130d63aaf6f095d56914771afa0eb92ae04620ad496710957e9d650e4485b72 time=2024-07-22T13:19:26.224Z level=WARN source=sched.go:634 msg="gpu VRAM usage didn't recover within timeout" seconds=5.250851195 model=/root/.ollama/models/blobs/sha256-a130d63aaf6f095d56914771afa0eb92ae04620ad496710957e9d650e4485b72 time=2024-07-22T13:19:26.473Z level=WARN source=sched.go:634 msg="gpu VRAM usage didn't recover within timeout" seconds=5.500499856 model=/root/.ollama/models/blobs/sha256-a130d63aaf6f095d56914771afa0eb92ae04620ad496710957e9d650e4485b72 ``` ### OS Linux ### GPU AMD ### CPU AMD ### Ollama version 0.2.8-rc1-rocm
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5850/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5850/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5751
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5751/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5751/comments
https://api.github.com/repos/ollama/ollama/issues/5751/events
https://github.com/ollama/ollama/issues/5751
2,414,209,658
I_kwDOJ0Z1Ps6P5ep6
5,751
New Command/ Flag to notify or download latest updates to an already pulled model.
{ "login": "nviraj", "id": 8409854, "node_id": "MDQ6VXNlcjg0MDk4NTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/8409854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nviraj", "html_url": "https://github.com/nviraj", "followers_url": "https://api.github.com/users/nviraj/followers", "following_url": "https://api.github.com/users/nviraj/following{/other_user}", "gists_url": "https://api.github.com/users/nviraj/gists{/gist_id}", "starred_url": "https://api.github.com/users/nviraj/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nviraj/subscriptions", "organizations_url": "https://api.github.com/users/nviraj/orgs", "repos_url": "https://api.github.com/users/nviraj/repos", "events_url": "https://api.github.com/users/nviraj/events{/privacy}", "received_events_url": "https://api.github.com/users/nviraj/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
3
2024-07-17T17:55:12
2024-07-25T12:59:18
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
There might be scenarios where quants might be recreated (e.g. gemma) or templates could be updated in the model registry. If there is some way to show it during ls or pull commands as info, it can improve user experience. Users can benefit from better quantization and prompt templates accordingly.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5751/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5751/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/7681
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7681/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7681/comments
https://api.github.com/repos/ollama/ollama/issues/7681/events
https://github.com/ollama/ollama/issues/7681
2,660,935,768
I_kwDOJ0Z1Ps6emqhY
7,681
Reinstall docker image with old models ?
{ "login": "remco-pc", "id": 8077908, "node_id": "MDQ6VXNlcjgwNzc5MDg=", "avatar_url": "https://avatars.githubusercontent.com/u/8077908?v=4", "gravatar_id": "", "url": "https://api.github.com/users/remco-pc", "html_url": "https://github.com/remco-pc", "followers_url": "https://api.github.com/users/remco-pc/followers", "following_url": "https://api.github.com/users/remco-pc/following{/other_user}", "gists_url": "https://api.github.com/users/remco-pc/gists{/gist_id}", "starred_url": "https://api.github.com/users/remco-pc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/remco-pc/subscriptions", "organizations_url": "https://api.github.com/users/remco-pc/orgs", "repos_url": "https://api.github.com/users/remco-pc/repos", "events_url": "https://api.github.com/users/remco-pc/events{/privacy}", "received_events_url": "https://api.github.com/users/remco-pc/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396220, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA", "url": "https://api.github.com/repos/ollama/ollama/labels/question", "name": "question", "color": "d876e3", "default": true, "description": "General questions" } ]
closed
false
null
[]
null
4
2024-11-15T06:29:29
2024-11-18T09:03:47
2024-11-15T08:14:11
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Re-installing my docker image with ollama causes the models to disappear from the home directory, can they be mounted somewhere to not re-install these models ? Are they learning from user input already ? ### OS Linux, Windows, Docker, WSL2 ### GPU _No response_ ### CPU Intel ### Ollama version 0.4.1
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7681/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7681/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/259
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/259/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/259/comments
https://api.github.com/repos/ollama/ollama/issues/259/events
https://github.com/ollama/ollama/issues/259
1,833,731,771
I_kwDOJ0Z1Ps5tTIa7
259
Enable GPU support on Linux
{ "login": "S1LV3RJ1NX", "id": 28920730, "node_id": "MDQ6VXNlcjI4OTIwNzMw", "avatar_url": "https://avatars.githubusercontent.com/u/28920730?v=4", "gravatar_id": "", "url": "https://api.github.com/users/S1LV3RJ1NX", "html_url": "https://github.com/S1LV3RJ1NX", "followers_url": "https://api.github.com/users/S1LV3RJ1NX/followers", "following_url": "https://api.github.com/users/S1LV3RJ1NX/following{/other_user}", "gists_url": "https://api.github.com/users/S1LV3RJ1NX/gists{/gist_id}", "starred_url": "https://api.github.com/users/S1LV3RJ1NX/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/S1LV3RJ1NX/subscriptions", "organizations_url": "https://api.github.com/users/S1LV3RJ1NX/orgs", "repos_url": "https://api.github.com/users/S1LV3RJ1NX/repos", "events_url": "https://api.github.com/users/S1LV3RJ1NX/events{/privacy}", "received_events_url": "https://api.github.com/users/S1LV3RJ1NX/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 5755339642, "node_id": "LA_kwDOJ0Z1Ps8AAAABVwuDeg", "url": "https://api.github.com/repos/ollama/ollama/labels/linux", "name": "linux", "color": "516E70", "default": false, "description": "" } ]
closed
false
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
18
2023-08-02T18:48:22
2024-04-01T18:28:24
2023-09-12T21:06:20
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I have built from source ollama. But when I pass a sentence to the model, it does not use GPU. The machine has 64G RAM and Tesla T4 GPU.
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/259/reactions", "total_count": 10, "+1": 10, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/259/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6835
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6835/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6835/comments
https://api.github.com/repos/ollama/ollama/issues/6835/events
https://github.com/ollama/ollama/issues/6835
2,529,746,251
I_kwDOJ0Z1Ps6WyN1L
6,835
llama3.1:8b-instruct-q4_1 doesn't handle system prompt correctly (looks like its dropped)
{ "login": "hellovai", "id": 2448742, "node_id": "MDQ6VXNlcjI0NDg3NDI=", "avatar_url": "https://avatars.githubusercontent.com/u/2448742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hellovai", "html_url": "https://github.com/hellovai", "followers_url": "https://api.github.com/users/hellovai/followers", "following_url": "https://api.github.com/users/hellovai/following{/other_user}", "gists_url": "https://api.github.com/users/hellovai/gists{/gist_id}", "starred_url": "https://api.github.com/users/hellovai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hellovai/subscriptions", "organizations_url": "https://api.github.com/users/hellovai/orgs", "repos_url": "https://api.github.com/users/hellovai/repos", "events_url": "https://api.github.com/users/hellovai/events{/privacy}", "received_events_url": "https://api.github.com/users/hellovai/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
1
2024-09-17T01:04:00
2024-12-02T22:53:40
2024-12-02T22:53:40
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? This fails ``` curl -X POST 'http://localhost:11434/v1/chat/completions' -H "content-type: application/json" -d '{ "model": "llama3.1:8b-instruct-q4_1", "messages": [ { "role": "system", "content": [ { "type": "text", "text": "Extract from this content:\nVaibhav Gupta\nvbv@boundaryml.com\n\nExperience:\n- Founder at BoundaryML\n- CV Engineer at Google\n- CV Engineer at Microsoft\n\nSkills:\n- Rust\n- C++\n\nAnswer in JSON using this schema:\n{\n name: string,\n email: string or null,\n experience: string[],\n skills: string[],\n}" } ] } ], "stream": true }' ``` But this does ``` curl -X POST 'http://localhost:11434/v1/chat/completions' -H "content-type: application/json" -d '{ "model": "llama3.1", "messages": [ { "role": "system", "content": [ { "type": "text", "text": "Extract from this content:\nVaibhav Gupta\nvbv@boundaryml.com\n\nExperience:\n- Founder at BoundaryML\n- CV Engineer at Google\n- CV Engineer at Microsoft\n\nSkills:\n- Rust\n- C++\n\nAnswer in JSON using this schema:\n{\n name: string,\n email: string or null,\n experience: string[],\n skills: string[],\n}" } ] } ], "stream": true }' ``` ### OS macOS ### GPU Apple ### CPU Apple ### Ollama version 0.3.10
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6835/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6835/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1663
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1663/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1663/comments
https://api.github.com/repos/ollama/ollama/issues/1663/events
https://github.com/ollama/ollama/issues/1663
2,052,942,902
I_kwDOJ0Z1Ps56XWw2
1,663
Using CUDA, but GPU shows near 0% usage
{ "login": "Firebrand", "id": 7831979, "node_id": "MDQ6VXNlcjc4MzE5Nzk=", "avatar_url": "https://avatars.githubusercontent.com/u/7831979?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Firebrand", "html_url": "https://github.com/Firebrand", "followers_url": "https://api.github.com/users/Firebrand/followers", "following_url": "https://api.github.com/users/Firebrand/following{/other_user}", "gists_url": "https://api.github.com/users/Firebrand/gists{/gist_id}", "starred_url": "https://api.github.com/users/Firebrand/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Firebrand/subscriptions", "organizations_url": "https://api.github.com/users/Firebrand/orgs", "repos_url": "https://api.github.com/users/Firebrand/repos", "events_url": "https://api.github.com/users/Firebrand/events{/privacy}", "received_events_url": "https://api.github.com/users/Firebrand/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 6430601766, "node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg", "url": "https://api.github.com/repos/ollama/ollama/labels/nvidia", "name": "nvidia", "color": "8CDB00", "default": false, "description": "Issues relating to Nvidia GPUs and CUDA" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
39
2023-12-21T19:15:05
2024-12-04T23:15:26
2024-03-13T17:26:53
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi folks, It appears that Ollama is using CUDA properly but in my resource monitor I'm getting near 0% GPU usage when running a prompt and the response is extremely slow (15 mins for one line response). Thanks! **Running on Ubuntu 22.04/WSL2/Windows 10 - GeForce GTX 1080 - 32GB RAM** ![image](https://github.com/jmorganca/ollama/assets/7831979/18d51ce6-b2df-4405-9a0a-343a2696e634) ![image](https://github.com/jmorganca/ollama/assets/7831979/46846baa-5e42-487e-9bda-a44ba0db4eda) ![image](https://github.com/jmorganca/ollama/assets/7831979/4411fe22-e826-4e2b-bee7-4a6148d743b5)
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1663/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1663/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8268
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8268/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8268/comments
https://api.github.com/repos/ollama/ollama/issues/8268/events
https://github.com/ollama/ollama/issues/8268
2,762,859,218
I_kwDOJ0Z1Ps6kreLS
8,268
When will Deepseek-v3 be supported?
{ "login": "joey-zmw", "id": 47174983, "node_id": "MDQ6VXNlcjQ3MTc0OTgz", "avatar_url": "https://avatars.githubusercontent.com/u/47174983?v=4", "gravatar_id": "", "url": "https://api.github.com/users/joey-zmw", "html_url": "https://github.com/joey-zmw", "followers_url": "https://api.github.com/users/joey-zmw/followers", "following_url": "https://api.github.com/users/joey-zmw/following{/other_user}", "gists_url": "https://api.github.com/users/joey-zmw/gists{/gist_id}", "starred_url": "https://api.github.com/users/joey-zmw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/joey-zmw/subscriptions", "organizations_url": "https://api.github.com/users/joey-zmw/orgs", "repos_url": "https://api.github.com/users/joey-zmw/repos", "events_url": "https://api.github.com/users/joey-zmw/events{/privacy}", "received_events_url": "https://api.github.com/users/joey-zmw/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
2
2024-12-30T08:46:18
2024-12-31T05:56:47
2024-12-31T05:56:47
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8268/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8268/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6170
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6170/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6170/comments
https://api.github.com/repos/ollama/ollama/issues/6170/events
https://github.com/ollama/ollama/pull/6170
2,447,749,096
PR_kwDOJ0Z1Ps53ZzjF
6,170
Compatibility Patches for LUMI Supercomputer
{ "login": "lupreCSC", "id": 143992330, "node_id": "U_kgDOCJUmCg", "avatar_url": "https://avatars.githubusercontent.com/u/143992330?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lupreCSC", "html_url": "https://github.com/lupreCSC", "followers_url": "https://api.github.com/users/lupreCSC/followers", "following_url": "https://api.github.com/users/lupreCSC/following{/other_user}", "gists_url": "https://api.github.com/users/lupreCSC/gists{/gist_id}", "starred_url": "https://api.github.com/users/lupreCSC/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lupreCSC/subscriptions", "organizations_url": "https://api.github.com/users/lupreCSC/orgs", "repos_url": "https://api.github.com/users/lupreCSC/repos", "events_url": "https://api.github.com/users/lupreCSC/events{/privacy}", "received_events_url": "https://api.github.com/users/lupreCSC/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-08-05T06:48:07
2024-11-22T16:19:47
2024-11-22T16:19:47
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6170", "html_url": "https://github.com/ollama/ollama/pull/6170", "diff_url": "https://github.com/ollama/ollama/pull/6170.diff", "patch_url": "https://github.com/ollama/ollama/pull/6170.patch", "merged_at": null }
These are 3 patches required to allow Ollama to be run on the LUMI EuroHPC Supercomputer. 1. 32adb46833f0e47b2ef3d967dc2d49dd7226e98f - Skip unreadable AMD GPU nodes in AMDGetGPUInfo. This is a general bug fix: Errors while reading (as opposed to opening) a `/sys/class/kfd/kfd/topology/nodes/*/properties` file were previously not caught, leading to uninitialized values for the corresponding GPU in `AMDGetGPUInfo`. 2. 07eb2f07a6ec24deb68980f1544fff7a206479fc - Check ROCM install for libhipblas.so* instead of libhipblas.so.2* Ollama compiles fine for older ROCm versions (such as 5.6.1) but then refuses to use them due to the current detection logic requiring ` libhipblas.so.2*` (without the code apparently really relying on hipblas v2). Relaxing to ` libhipblas.so.*` allows it to run without observed issues. 3. 77cc65fa1bd9831eb92909b757301213718d7f82 - Index only available AMD GPUs. On systems using the SLUM job scheduler and cgroups to limit access to GPU devices, Ollama's mapping of device indices to devices is inaccurate, i.e., devices end up being indexed differently by the ROCm Runtime than by Ollama in this case. (by _indexed_ I here refer to the number assigned to a GPU device, as is used, e.g., to set `ROCR_VISIBLE_DEVICES`, etc). This is a fix for that. Please refer to the individual commit texts for more detailed descriptions. For the last commit I have to admit that I cannot be a hundred percent sure that it would work for all systems. It appears to work fine for LUMI but I was unable to confirm this for other systems. I am willing to exclude it from this PR in favour of getting the other changes in, if you feel strongly opposed to it. However, the underlying problem that it attempts to fix should have another solution then. (Probably ideal would be if Ollama would use some ROCm provided API to obtain all available devices rather than enumerating things in `/sys` manually).
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6170/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6170/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1004
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1004/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1004/comments
https://api.github.com/repos/ollama/ollama/issues/1004/events
https://github.com/ollama/ollama/issues/1004
1,977,521,531
I_kwDOJ0Z1Ps513pV7
1,004
Feature request: Make `ollama run` also launch `serve` as child process
{ "login": "Anrock", "id": 1689392, "node_id": "MDQ6VXNlcjE2ODkzOTI=", "avatar_url": "https://avatars.githubusercontent.com/u/1689392?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Anrock", "html_url": "https://github.com/Anrock", "followers_url": "https://api.github.com/users/Anrock/followers", "following_url": "https://api.github.com/users/Anrock/following{/other_user}", "gists_url": "https://api.github.com/users/Anrock/gists{/gist_id}", "starred_url": "https://api.github.com/users/Anrock/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Anrock/subscriptions", "organizations_url": "https://api.github.com/users/Anrock/orgs", "repos_url": "https://api.github.com/users/Anrock/repos", "events_url": "https://api.github.com/users/Anrock/events{/privacy}", "received_events_url": "https://api.github.com/users/Anrock/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
3
2023-11-04T21:37:54
2023-11-04T22:49:56
2023-11-04T22:49:56
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I'm using ollama mostly for fun and occasional queries - couple of times per day. Right now one have to run `ollama serve` in one terminal and then run `ollama run` in another terminal which is a bit clunky. Another option is to constantly have `ollama serve` running in the background however running a model consumes substantial amount of RAM and VRAM. Would it be possible to make `ollama run` also run a `serve` as child process and shut it down when user exits from the prompt if some option is given to `run`?
{ "login": "Anrock", "id": 1689392, "node_id": "MDQ6VXNlcjE2ODkzOTI=", "avatar_url": "https://avatars.githubusercontent.com/u/1689392?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Anrock", "html_url": "https://github.com/Anrock", "followers_url": "https://api.github.com/users/Anrock/followers", "following_url": "https://api.github.com/users/Anrock/following{/other_user}", "gists_url": "https://api.github.com/users/Anrock/gists{/gist_id}", "starred_url": "https://api.github.com/users/Anrock/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Anrock/subscriptions", "organizations_url": "https://api.github.com/users/Anrock/orgs", "repos_url": "https://api.github.com/users/Anrock/repos", "events_url": "https://api.github.com/users/Anrock/events{/privacy}", "received_events_url": "https://api.github.com/users/Anrock/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1004/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1004/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3500
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3500/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3500/comments
https://api.github.com/repos/ollama/ollama/issues/3500/events
https://github.com/ollama/ollama/issues/3500
2,227,385,593
I_kwDOJ0Z1Ps6EwzT5
3,500
:hugs: Please add HF url to gemma model
{ "login": "adriens", "id": 5235127, "node_id": "MDQ6VXNlcjUyMzUxMjc=", "avatar_url": "https://avatars.githubusercontent.com/u/5235127?v=4", "gravatar_id": "", "url": "https://api.github.com/users/adriens", "html_url": "https://github.com/adriens", "followers_url": "https://api.github.com/users/adriens/followers", "following_url": "https://api.github.com/users/adriens/following{/other_user}", "gists_url": "https://api.github.com/users/adriens/gists{/gist_id}", "starred_url": "https://api.github.com/users/adriens/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/adriens/subscriptions", "organizations_url": "https://api.github.com/users/adriens/orgs", "repos_url": "https://api.github.com/users/adriens/repos", "events_url": "https://api.github.com/users/adriens/events{/privacy}", "received_events_url": "https://api.github.com/users/adriens/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
0
2024-04-05T08:27:59
2024-04-19T15:41:17
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What are you trying to do? I would like to get HF url from https://ollama.com/library/gemma ### How should we solve this? By adding https://huggingface.co/docs/transformers/en/model_doc/gemma to https://ollama.com/library/gemma ### What is the impact of not solving this? Not optimized documentation ;-p ### Anything else? _No response_
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3500/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3500/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/7720
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7720/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7720/comments
https://api.github.com/repos/ollama/ollama/issues/7720/events
https://github.com/ollama/ollama/issues/7720
2,667,126,255
I_kwDOJ0Z1Ps6e-R3v
7,720
Why can't the installation directory be modified
{ "login": "chengpengvb", "id": 28288567, "node_id": "MDQ6VXNlcjI4Mjg4NTY3", "avatar_url": "https://avatars.githubusercontent.com/u/28288567?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chengpengvb", "html_url": "https://github.com/chengpengvb", "followers_url": "https://api.github.com/users/chengpengvb/followers", "following_url": "https://api.github.com/users/chengpengvb/following{/other_user}", "gists_url": "https://api.github.com/users/chengpengvb/gists{/gist_id}", "starred_url": "https://api.github.com/users/chengpengvb/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chengpengvb/subscriptions", "organizations_url": "https://api.github.com/users/chengpengvb/orgs", "repos_url": "https://api.github.com/users/chengpengvb/repos", "events_url": "https://api.github.com/users/chengpengvb/events{/privacy}", "received_events_url": "https://api.github.com/users/chengpengvb/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
2
2024-11-18T04:46:32
2024-11-18T22:05:53
2024-11-18T22:05:53
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I found that it is installed in the user directory of the C drive by default on Windows, which is unscientific. I think it should be modifiable. Secondly, the download location of the model cannot be modified through the UI interface but through the environment variable, which is too low.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7720/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7720/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2123
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2123/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2123/comments
https://api.github.com/repos/ollama/ollama/issues/2123/events
https://github.com/ollama/ollama/issues/2123
2,092,572,363
I_kwDOJ0Z1Ps58uh7L
2,123
After upgrading Ollama. It just doesn't run anymore any model: Error: Post "http://127.0.0.1:11434/api/generate": EOF
{ "login": "venturaEffect", "id": 29057173, "node_id": "MDQ6VXNlcjI5MDU3MTcz", "avatar_url": "https://avatars.githubusercontent.com/u/29057173?v=4", "gravatar_id": "", "url": "https://api.github.com/users/venturaEffect", "html_url": "https://github.com/venturaEffect", "followers_url": "https://api.github.com/users/venturaEffect/followers", "following_url": "https://api.github.com/users/venturaEffect/following{/other_user}", "gists_url": "https://api.github.com/users/venturaEffect/gists{/gist_id}", "starred_url": "https://api.github.com/users/venturaEffect/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/venturaEffect/subscriptions", "organizations_url": "https://api.github.com/users/venturaEffect/orgs", "repos_url": "https://api.github.com/users/venturaEffect/repos", "events_url": "https://api.github.com/users/venturaEffect/events{/privacy}", "received_events_url": "https://api.github.com/users/venturaEffect/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
19
2024-01-21T12:50:33
2024-01-28T13:49:02
2024-01-25T17:13:55
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I see this error everywhere during months. There are plenty of releases but this error continuesly appears. Followed on Discord, searched on the web, saw issues on the repo related to this. Did things like creating a Modelfile "dolphin-mistral" with `FROM dolphin-2.1-mistral-7b PARAMETER num_gpu 0`. Ollama upgraded: `curl https://ollama.ai/install.sh | sh` Nothing: `ollama run dolphin-mistral Error: Post "http://127.0.0.1:11434/api/generate": EOF` Any suggestion will be much appreciated.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2123/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/2123/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4430
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4430/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4430/comments
https://api.github.com/repos/ollama/ollama/issues/4430/events
https://github.com/ollama/ollama/pull/4430
2,295,940,809
PR_kwDOJ0Z1Ps5vbVNE
4,430
Remove VRAM convergence check for windows
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-05-14T16:48:30
2024-05-14T17:59:10
2024-05-14T17:59:07
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4430", "html_url": "https://github.com/ollama/ollama/pull/4430", "diff_url": "https://github.com/ollama/ollama/pull/4430.diff", "patch_url": "https://github.com/ollama/ollama/pull/4430.patch", "merged_at": "2024-05-14T17:59:07" }
The APIs we query are optimistic on free space, and windows pages VRAM, so we don't have to wait to see reported usage recover on unload
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4430/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4430/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5842
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5842/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5842/comments
https://api.github.com/repos/ollama/ollama/issues/5842/events
https://github.com/ollama/ollama/issues/5842
2,421,997,407
I_kwDOJ0Z1Ps6QXL9f
5,842
Model Reloading and Excessive VRAM Usage Issues with Ollama Backend
{ "login": "ALEX000V", "id": 77981765, "node_id": "MDQ6VXNlcjc3OTgxNzY1", "avatar_url": "https://avatars.githubusercontent.com/u/77981765?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ALEX000V", "html_url": "https://github.com/ALEX000V", "followers_url": "https://api.github.com/users/ALEX000V/followers", "following_url": "https://api.github.com/users/ALEX000V/following{/other_user}", "gists_url": "https://api.github.com/users/ALEX000V/gists{/gist_id}", "starred_url": "https://api.github.com/users/ALEX000V/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ALEX000V/subscriptions", "organizations_url": "https://api.github.com/users/ALEX000V/orgs", "repos_url": "https://api.github.com/users/ALEX000V/repos", "events_url": "https://api.github.com/users/ALEX000V/events{/privacy}", "received_events_url": "https://api.github.com/users/ALEX000V/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-07-22T05:47:36
2024-07-22T08:19:09
2024-07-22T08:19:09
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ### Relevant environment info ```Markdown - OS: Windows 11 23H2 - Continue: v0.8.43 / v0.0.55 - IDE: VSCode 1.91.1 / IntelliJ IDEA 2024.1.4 (Community Edition) - Model: deepseek-coder-v2:16b-lite-instruct-q4_0 - ollama: v0.2.7 - CUDA: V12.3.103 - config.json: { "models": [ { "title": "Ollama", "provider": "ollama", "model": "AUTODETECT" }, { "title": "deepseek-coder-v2:16b", "provider": "ollama", "model": "deepseek-coder-v2:16b-lite-instruct-q4_0", "apiBase": "http://localhost:11434" } ], "tabAutocompleteModel": { "title": "deepseek-coder-v2:16b", "provider": "ollama", "model": "deepseek-coder-v2:16b-lite-instruct-q4_0" }, "allowAnonymousTelemetry": false } ``` ### Description **Description:** I am encountering two distinct but related issues when using the Continue plugin in VSCode and IDEA with Ollama as the backend for model processing. The first issue involves models being repeatedly loaded and unloaded when accessed through different interfaces, and the second issue pertains to abnormal VRAM usage when models are accessed via the Continue plugin compared to other methods. **VRAM Changes Example:** When `OLLAMA_NUM_PARALLEL` is set to 4, after loading the `deepseek-coder-v2:16b-lite-instruct-q4_0` model using the Continue plugin, executing `ollama run deepseek-coder-v2:16b-lite-instruct-q4_0` via the command line, and then sending information to the model using the Continue plugin again. ![vram_load](https://github.com/user-attachments/assets/0c11355e-3107-464e-bd5e-c55594c66bb7) ### To reproduce **Issue 1: Model Reloading** - **Steps to Reproduce:** 1. Open VSCode/IDEA and activate the Continue plugin. 2. Use the Continue plugin to send a request to Ollama to load a specific model. 3. Attempt to access the same model via another non-Continue plugin method (e.g., command line, another plugin, or web UI). 4. Observe that the model is unloaded and then reloaded, rather than being reused from memory. 5. Alternatively, first load the model using a non-Continue plugin method, then attempt to access it via the Continue plugin. 6. Observe that the model is again unloaded and then reloaded. - **Expected Behavior:** Once a model is loaded by any interface, it should be accessible by all other interfaces without needing to be reloaded. - **Actual Behavior:** Each time the model is accessed through a different (Continue vs. non-Continue) interface, it is unloaded and then reloaded. **Issue 2: Excessive VRAM Usage** - **Steps to Reproduce:** 1. Use the Continue plugin to load a model (e.g., `deepseek-coder-v2:16b-lite-instruct-q4_0`). 2. Compare the VRAM usage with the same model loaded via other methods (e.g., command line, other plugins, or web UI). 3. Observe that the VRAM usage is significantly higher when using the Continue plugin. - **Expected Behavior:** The VRAM usage should be consistent regardless of the method used to load the model. - **Actual Behavior:** The VRAM usage is higher when using the Continue plugin compared to other methods. For example, with each increment of `OLLAMA_NUM_PARALLEL` by 1, the command line method adds 0.6GiB, while the Continue plugin adds 1.2GiB. ### Log output _No response_ ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.2.7
{ "login": "ALEX000V", "id": 77981765, "node_id": "MDQ6VXNlcjc3OTgxNzY1", "avatar_url": "https://avatars.githubusercontent.com/u/77981765?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ALEX000V", "html_url": "https://github.com/ALEX000V", "followers_url": "https://api.github.com/users/ALEX000V/followers", "following_url": "https://api.github.com/users/ALEX000V/following{/other_user}", "gists_url": "https://api.github.com/users/ALEX000V/gists{/gist_id}", "starred_url": "https://api.github.com/users/ALEX000V/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ALEX000V/subscriptions", "organizations_url": "https://api.github.com/users/ALEX000V/orgs", "repos_url": "https://api.github.com/users/ALEX000V/repos", "events_url": "https://api.github.com/users/ALEX000V/events{/privacy}", "received_events_url": "https://api.github.com/users/ALEX000V/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5842/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5842/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8475
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8475/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8475/comments
https://api.github.com/repos/ollama/ollama/issues/8475/events
https://github.com/ollama/ollama/issues/8475
2,796,575,411
I_kwDOJ0Z1Ps6msFqz
8,475
ollama/ollama:rocm not detecting AMD GPU being passed in
{ "login": "Vilchis-Joshua", "id": 24929856, "node_id": "MDQ6VXNlcjI0OTI5ODU2", "avatar_url": "https://avatars.githubusercontent.com/u/24929856?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Vilchis-Joshua", "html_url": "https://github.com/Vilchis-Joshua", "followers_url": "https://api.github.com/users/Vilchis-Joshua/followers", "following_url": "https://api.github.com/users/Vilchis-Joshua/following{/other_user}", "gists_url": "https://api.github.com/users/Vilchis-Joshua/gists{/gist_id}", "starred_url": "https://api.github.com/users/Vilchis-Joshua/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Vilchis-Joshua/subscriptions", "organizations_url": "https://api.github.com/users/Vilchis-Joshua/orgs", "repos_url": "https://api.github.com/users/Vilchis-Joshua/repos", "events_url": "https://api.github.com/users/Vilchis-Joshua/events{/privacy}", "received_events_url": "https://api.github.com/users/Vilchis-Joshua/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
2
2025-01-18T01:48:35
2025-01-24T20:55:36
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? # Overview I've been trying to add ollama/openwebui to my home lab and I've run across an issue I cannot figure out. ROCM has finally been released last month for my linux distro and I've got it installed and can run (on gpu) ollama on my host machine. When I try to transition to docker however, I am unable to get it to work. I will provide what I have but please ask any clarifying questions. ## Components Host: Debian 6.1.124-1 CPU: I7-12700K GPU: Radeon 6900 XT # Docker compose file ``` services: ollama: image: ollama/ollama:rocm container_name: ollama hostname: ollama privileged: true volumes: - ollama:/root/.ollama environment: HSA_OVERRIDE_GFX_VERSION: 10.3.0 AMD_SERIALIZE_KERNEL: 3 HIP_VISIBLE_DEVICES: 0 OLLAMA_DEBUG: 1 AMD_LOG_LEVEL: 3 # ports: # - :11434 restart: unless-stopped networks: - npm devices: - /dev/kfd - /dev/dri group_add: - 105 - 44 security_opt: - seccomp:unconfined ``` # Logs & Outputs No errors that I can really tell. Here are the logs on startup in docker ``` 2025-01-17 20:40:33 2025/01/18 01:40:33 routes.go:1187: INFO server config env="map[CUDA_VISIBLE_DEVICES: GPU_DEVICE_ORDINAL: HIP_VISIBLE_DEVICES:0 HSA_OVERRIDE_GFX_VERSION:10.3.0 HTTPS_PROXY: HTTP_PROXY: NO_PROXY: OLLAMA_DEBUG:true OLLAMA_FLASH_ATTENTION:false OLLAMA_GPU_OVERHEAD:0 OLLAMA_HOST:http://0.0.0.0:11434 OLLAMA_INTEL_GPU:false OLLAMA_KEEP_ALIVE:5m0s OLLAMA_KV_CACHE_TYPE: OLLAMA_LLM_LIBRARY: OLLAMA_LOAD_TIMEOUT:5m0s OLLAMA_MAX_LOADED_MODELS:0 OLLAMA_MAX_QUEUE:512 OLLAMA_MODELS:/root/.ollama/models OLLAMA_MULTIUSER_CACHE:false OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:0 OLLAMA_ORIGINS:[http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:* app://* file://* tauri://* vscode-webview://*] OLLAMA_SCHED_SPREAD:false ROCR_VISIBLE_DEVICES: http_proxy: https_proxy: no_proxy:]" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.654Z level=INFO source=images.go:432 msg="total blobs: 0" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.655Z level=INFO source=images.go:439 msg="total unused blobs removed: 0" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.655Z level=INFO source=routes.go:1238 msg="Listening on [::]:11434 (version 0.5.7-0-ga420a45-dirty)" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.655Z level=DEBUG source=common.go:80 msg="runners located" dir=/usr/lib/ollama/runners 2025-01-17 20:40:33 time=2025-01-18T01:40:33.655Z level=DEBUG source=common.go:124 msg="availableServers : found" file=/usr/lib/ollama/runners/cpu_avx/ollama_llama_server 2025-01-17 20:40:33 time=2025-01-18T01:40:33.655Z level=DEBUG source=common.go:124 msg="availableServers : found" file=/usr/lib/ollama/runners/cpu_avx2/ollama_llama_server 2025-01-17 20:40:33 time=2025-01-18T01:40:33.655Z level=DEBUG source=common.go:124 msg="availableServers : found" file=/usr/lib/ollama/runners/rocm_avx/ollama_llama_server 2025-01-17 20:40:33 time=2025-01-18T01:40:33.655Z level=INFO source=routes.go:1267 msg="Dynamic LLM libraries" runners="[cpu cpu_avx cpu_avx2 rocm_avx]" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.655Z level=DEBUG source=routes.go:1268 msg="Override detection logic by setting OLLAMA_LLM_LIBRARY" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.655Z level=DEBUG source=sched.go:105 msg="starting llm scheduler" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.656Z level=INFO source=gpu.go:226 msg="looking for compatible GPUs" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.656Z level=DEBUG source=gpu.go:99 msg="searching for GPU discovery libraries for NVIDIA" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.656Z level=DEBUG source=gpu.go:517 msg="Searching for GPU library" name=libcuda.so* 2025-01-17 20:40:33 time=2025-01-18T01:40:33.656Z level=DEBUG source=gpu.go:543 msg="gpu library search" globs="[/usr/lib/ollama/libcuda.so* /usr/lib/ollama/libcuda.so* /libcuda.so* /usr/local/cuda*/targets/*/lib/libcuda.so* /usr/lib/*-linux-gnu/nvidia/current/libcuda.so* /usr/lib/*-linux-gnu/libcuda.so* /usr/lib/wsl/lib/libcuda.so* /usr/lib/wsl/drivers/*/libcuda.so* /opt/cuda/lib*/libcuda.so* /usr/local/cuda/lib*/libcuda.so* /usr/lib*/libcuda.so* /usr/local/lib*/libcuda.so*]" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.657Z level=DEBUG source=gpu.go:576 msg="discovered GPU libraries" paths=[] 2025-01-17 20:40:33 time=2025-01-18T01:40:33.657Z level=DEBUG source=gpu.go:517 msg="Searching for GPU library" name=libcudart.so* 2025-01-17 20:40:33 time=2025-01-18T01:40:33.657Z level=DEBUG source=gpu.go:543 msg="gpu library search" globs="[/usr/lib/ollama/libcudart.so* /usr/lib/ollama/libcudart.so* /libcudart.so* /usr/lib/ollama/libcudart.so* /usr/lib/ollama/libcudart.so* /usr/local/cuda/lib64/libcudart.so* /usr/lib/x86_64-linux-gnu/nvidia/current/libcudart.so* /usr/lib/x86_64-linux-gnu/libcudart.so* /usr/lib/wsl/lib/libcudart.so* /usr/lib/wsl/drivers/*/libcudart.so* /opt/cuda/lib64/libcudart.so* /usr/local/cuda*/targets/aarch64-linux/lib/libcudart.so* /usr/lib/aarch64-linux-gnu/nvidia/current/libcudart.so* /usr/lib/aarch64-linux-gnu/libcudart.so* /usr/local/cuda/lib*/libcudart.so* /usr/lib*/libcudart.so* /usr/local/lib*/libcudart.so*]" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.657Z level=DEBUG source=gpu.go:576 msg="discovered GPU libraries" paths=[] 2025-01-17 20:40:33 time=2025-01-18T01:40:33.657Z level=DEBUG source=amd_linux.go:421 msg="amdgpu driver not detected /sys/module/amdgpu" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.657Z level=INFO source=gpu.go:392 msg="no compatible GPUs were discovered" 2025-01-17 20:40:33 time=2025-01-18T01:40:33.657Z level=INFO source=types.go:131 msg="inference compute" id=0 library=cpu variant=avx2 compute="" driver=0.0 name="" total="50.7 GiB" available="35.1 GiB" ``` ```ls -ld /sys/module/amdgpu``` is display ```drwxr-xr-x 7 root root 0 Jan 17 12:23 /sys/module/amdgpu``` ### OS Linux, Docker ### GPU AMD ### CPU Intel ### Ollama version 0.5.7-0-ga420a45-dirty
{ "login": "Vilchis-Joshua", "id": 24929856, "node_id": "MDQ6VXNlcjI0OTI5ODU2", "avatar_url": "https://avatars.githubusercontent.com/u/24929856?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Vilchis-Joshua", "html_url": "https://github.com/Vilchis-Joshua", "followers_url": "https://api.github.com/users/Vilchis-Joshua/followers", "following_url": "https://api.github.com/users/Vilchis-Joshua/following{/other_user}", "gists_url": "https://api.github.com/users/Vilchis-Joshua/gists{/gist_id}", "starred_url": "https://api.github.com/users/Vilchis-Joshua/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Vilchis-Joshua/subscriptions", "organizations_url": "https://api.github.com/users/Vilchis-Joshua/orgs", "repos_url": "https://api.github.com/users/Vilchis-Joshua/repos", "events_url": "https://api.github.com/users/Vilchis-Joshua/events{/privacy}", "received_events_url": "https://api.github.com/users/Vilchis-Joshua/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8475/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8475/timeline
null
reopened
false
https://api.github.com/repos/ollama/ollama/issues/7989
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7989/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7989/comments
https://api.github.com/repos/ollama/ollama/issues/7989/events
https://github.com/ollama/ollama/issues/7989
2,724,776,207
I_kwDOJ0Z1Ps6iaMkP
7,989
GPU no being used.
{ "login": "tom4oc", "id": 172549478, "node_id": "U_kgDOCkjlZg", "avatar_url": "https://avatars.githubusercontent.com/u/172549478?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tom4oc", "html_url": "https://github.com/tom4oc", "followers_url": "https://api.github.com/users/tom4oc/followers", "following_url": "https://api.github.com/users/tom4oc/following{/other_user}", "gists_url": "https://api.github.com/users/tom4oc/gists{/gist_id}", "starred_url": "https://api.github.com/users/tom4oc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tom4oc/subscriptions", "organizations_url": "https://api.github.com/users/tom4oc/orgs", "repos_url": "https://api.github.com/users/tom4oc/repos", "events_url": "https://api.github.com/users/tom4oc/events{/privacy}", "received_events_url": "https://api.github.com/users/tom4oc/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-12-07T19:12:23
2024-12-14T16:27:40
2024-12-14T16:27:40
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I got my VM (on my TrueNAS machine) up and running then installed Whisper and OLLAMA. I installed OLLAMA 3.2 via the CURL command. I can see that Whisper is access the GPU when running, but Ollama isn't. I'm stuck. If I type a simple prompt like: this is a test, a response can take anywhere from 3-10 minutes to type out 15 words. I have a NVIDIA 4070 in the TrueNas, like I said passthrough is working for whisper. Thank you for any suggestions Tommy TrueNas Scale AMD EPYC 7286 256GB RAM NVIDA 4070 ### OS Linux ### GPU Nvidia ### CPU AMD ### Ollama version 3.2
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7989/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7989/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5905
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5905/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5905/comments
https://api.github.com/repos/ollama/ollama/issues/5905/events
https://github.com/ollama/ollama/issues/5905
2,427,002,927
I_kwDOJ0Z1Ps6QqSAv
5,905
Forcing Ollama to bind to 0.0.0.0 instead of localhost
{ "login": "MrLinks75", "id": 31570259, "node_id": "MDQ6VXNlcjMxNTcwMjU5", "avatar_url": "https://avatars.githubusercontent.com/u/31570259?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MrLinks75", "html_url": "https://github.com/MrLinks75", "followers_url": "https://api.github.com/users/MrLinks75/followers", "following_url": "https://api.github.com/users/MrLinks75/following{/other_user}", "gists_url": "https://api.github.com/users/MrLinks75/gists{/gist_id}", "starred_url": "https://api.github.com/users/MrLinks75/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MrLinks75/subscriptions", "organizations_url": "https://api.github.com/users/MrLinks75/orgs", "repos_url": "https://api.github.com/users/MrLinks75/repos", "events_url": "https://api.github.com/users/MrLinks75/events{/privacy}", "received_events_url": "https://api.github.com/users/MrLinks75/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
6
2024-07-24T09:07:57
2024-11-18T13:30:48
2024-09-04T03:40:32
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi everyone, Hope you're doing great. The request would be to force ollama to bind automatically to 0.0.0.0 to allow it to interface with tools / instances inside a docker container. At the moment the only feasible option I've figured was to add `Environment="OLLAMA_HOST=0.0.0.0"` into the `ollama.service` file, and this is required to be done after every update. Would it be possible to implement that feature ? Or even have it bind on both 0.0.0.0 and localhost on deployment?
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5905/reactions", "total_count": 2, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 2 }
https://api.github.com/repos/ollama/ollama/issues/5905/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1743
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1743/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1743/comments
https://api.github.com/repos/ollama/ollama/issues/1743/events
https://github.com/ollama/ollama/issues/1743
2,060,776,912
I_kwDOJ0Z1Ps561PXQ
1,743
reGPT and mGPT model support
{ "login": "Zig1375", "id": 2699034, "node_id": "MDQ6VXNlcjI2OTkwMzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/2699034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Zig1375", "html_url": "https://github.com/Zig1375", "followers_url": "https://api.github.com/users/Zig1375/followers", "following_url": "https://api.github.com/users/Zig1375/following{/other_user}", "gists_url": "https://api.github.com/users/Zig1375/gists{/gist_id}", "starred_url": "https://api.github.com/users/Zig1375/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Zig1375/subscriptions", "organizations_url": "https://api.github.com/users/Zig1375/orgs", "repos_url": "https://api.github.com/users/Zig1375/repos", "events_url": "https://api.github.com/users/Zig1375/events{/privacy}", "received_events_url": "https://api.github.com/users/Zig1375/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
1
2023-12-30T15:27:39
2024-08-08T06:50:39
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Please add the following models: - https://huggingface.co/ai-forever/ruGPT-3.5-13B - https://huggingface.co/ai-forever/mGPT-13B
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1743/reactions", "total_count": 2, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/1743/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/2695
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2695/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2695/comments
https://api.github.com/repos/ollama/ollama/issues/2695/events
https://github.com/ollama/ollama/pull/2695
2,149,881,529
PR_kwDOJ0Z1Ps5nsOcT
2,695
Docker compose example
{ "login": "jmcdice", "id": 161679, "node_id": "MDQ6VXNlcjE2MTY3OQ==", "avatar_url": "https://avatars.githubusercontent.com/u/161679?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmcdice", "html_url": "https://github.com/jmcdice", "followers_url": "https://api.github.com/users/jmcdice/followers", "following_url": "https://api.github.com/users/jmcdice/following{/other_user}", "gists_url": "https://api.github.com/users/jmcdice/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmcdice/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmcdice/subscriptions", "organizations_url": "https://api.github.com/users/jmcdice/orgs", "repos_url": "https://api.github.com/users/jmcdice/repos", "events_url": "https://api.github.com/users/jmcdice/events{/privacy}", "received_events_url": "https://api.github.com/users/jmcdice/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-02-22T20:27:18
2024-11-21T09:06:29
2024-11-21T09:06:28
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2695", "html_url": "https://github.com/ollama/ollama/pull/2695", "diff_url": "https://github.com/ollama/ollama/pull/2695.diff", "patch_url": "https://github.com/ollama/ollama/pull/2695.patch", "merged_at": null }
This PR improves upon #1379 - Introduced variable OLLAMA_PORT for flexible port mapping. - Modified health check to explicitly return exit code on failure. - Extended health check interval to 10s for reduced load. - Explicitly defined build context and Dockerfile path for clarity. Add README markdown with build and run instructions.
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2695/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2695/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2069
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2069/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2069/comments
https://api.github.com/repos/ollama/ollama/issues/2069/events
https://github.com/ollama/ollama/issues/2069
2,089,873,109
I_kwDOJ0Z1Ps58kO7V
2,069
illegal hardware instruction ollama run llama2
{ "login": "LoveSlience", "id": 22396365, "node_id": "MDQ6VXNlcjIyMzk2MzY1", "avatar_url": "https://avatars.githubusercontent.com/u/22396365?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LoveSlience", "html_url": "https://github.com/LoveSlience", "followers_url": "https://api.github.com/users/LoveSlience/followers", "following_url": "https://api.github.com/users/LoveSlience/following{/other_user}", "gists_url": "https://api.github.com/users/LoveSlience/gists{/gist_id}", "starred_url": "https://api.github.com/users/LoveSlience/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LoveSlience/subscriptions", "organizations_url": "https://api.github.com/users/LoveSlience/orgs", "repos_url": "https://api.github.com/users/LoveSlience/repos", "events_url": "https://api.github.com/users/LoveSlience/events{/privacy}", "received_events_url": "https://api.github.com/users/LoveSlience/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
7
2024-01-19T07:55:52
2024-01-26T21:11:03
2024-01-26T21:11:03
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
When i download mac app then run `ollama run llama2` has error `7326 illegal hardware instruction ollama run llama2` ![image](https://github.com/jmorganca/ollama/assets/22396365/58c5ab74-0a0e-4ed1-a074-da26f63bcd97) ![image](https://github.com/jmorganca/ollama/assets/22396365/9847a70c-5439-412c-8ae5-2270d615a68e)
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2069/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/2069/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/442
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/442/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/442/comments
https://api.github.com/repos/ollama/ollama/issues/442/events
https://github.com/ollama/ollama/pull/442
1,872,922,012
PR_kwDOJ0Z1Ps5ZGaNG
442
treat stop as stop sequences, not exact tokens
{ "login": "sqs", "id": 1976, "node_id": "MDQ6VXNlcjE5NzY=", "avatar_url": "https://avatars.githubusercontent.com/u/1976?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sqs", "html_url": "https://github.com/sqs", "followers_url": "https://api.github.com/users/sqs/followers", "following_url": "https://api.github.com/users/sqs/following{/other_user}", "gists_url": "https://api.github.com/users/sqs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sqs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sqs/subscriptions", "organizations_url": "https://api.github.com/users/sqs/orgs", "repos_url": "https://api.github.com/users/sqs/repos", "events_url": "https://api.github.com/users/sqs/events{/privacy}", "received_events_url": "https://api.github.com/users/sqs/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-08-30T05:19:09
2023-08-30T15:54:14
2023-08-30T15:53:42
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/442", "html_url": "https://github.com/ollama/ollama/pull/442", "diff_url": "https://github.com/ollama/ollama/pull/442.diff", "patch_url": "https://github.com/ollama/ollama/pull/442.patch", "merged_at": "2023-08-30T15:53:42" }
The `stop` option to the generate API is a list of sequences that should cause generation to stop. Although these are commonly called "stop tokens", they do not necessarily correspond to LLM tokens (per the LLM's tokenizer). For example, if the caller sends a generate request with `"stop":["\n"]`, then generation should stop on any token containing `\n` (and trim `\n` from the output), not just if the token exactly matches `\n`. If `stop` were interpreted strictly as LLM tokens, then it would require callers of the generate API to know the LLM's tokenizer and enumerate many tokens in the `stop` list. Fixes https://github.com/jmorganca/ollama/issues/295. Example output (note that generation ends on a token ` not` that is truncated to ` n` because the stop sequence is `ot`): ``` % curl -d '{"prompt":"const primes=[1,2,3,","model":"codellama:7b","options":{"seed":1337,"temperature":0,"num_ctx":100,"stop":["ot"]}}' http://localhost:11434/api/generate {"model":"codellama:7b","created_at":"2023-08-30T05:17:54.435096Z","response":" The","done":false} {"model":"codellama:7b","created_at":"2023-08-30T05:17:54.486337Z","response":" code","done":false} {"model":"codellama:7b","created_at":"2023-08-30T05:17:54.53943Z","response":" you","done":false} {"model":"codellama:7b","created_at":"2023-08-30T05:17:54.593747Z","response":" provided","done":false} {"model":"codellama:7b","created_at":"2023-08-30T05:17:54.648514Z","response":" is","done":false} {"model":"codellama:7b","created_at":"2023-08-30T05:17:54.702975Z","response":" n","done":false} {"model":"codellama:7b","created_at":"2023-08-30T05:17:54.702999Z","done":true, ...} ```
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/442/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/442/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3973
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3973/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3973/comments
https://api.github.com/repos/ollama/ollama/issues/3973/events
https://github.com/ollama/ollama/issues/3973
2,266,866,179
I_kwDOJ0Z1Ps6HHaID
3,973
Ollama下载太慢 (downloads from github slow in china)
{ "login": "code101015", "id": 163701882, "node_id": "U_kgDOCcHkeg", "avatar_url": "https://avatars.githubusercontent.com/u/163701882?v=4", "gravatar_id": "", "url": "https://api.github.com/users/code101015", "html_url": "https://github.com/code101015", "followers_url": "https://api.github.com/users/code101015/followers", "following_url": "https://api.github.com/users/code101015/following{/other_user}", "gists_url": "https://api.github.com/users/code101015/gists{/gist_id}", "starred_url": "https://api.github.com/users/code101015/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/code101015/subscriptions", "organizations_url": "https://api.github.com/users/code101015/orgs", "repos_url": "https://api.github.com/users/code101015/repos", "events_url": "https://api.github.com/users/code101015/events{/privacy}", "received_events_url": "https://api.github.com/users/code101015/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 6677370291, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCVsw", "url": "https://api.github.com/repos/ollama/ollama/labels/networking", "name": "networking", "color": "0B5368", "default": false, "description": "Issues relating to ollama pull and push" } ]
open
false
null
[]
null
10
2024-04-27T06:19:27
2024-12-12T09:45:21
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Ollamasetup.exe的下载速度太慢了。要将近20个小时,有没有什么办法
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3973/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3973/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/8181
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8181/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8181/comments
https://api.github.com/repos/ollama/ollama/issues/8181/events
https://github.com/ollama/ollama/issues/8181
2,752,166,346
I_kwDOJ0Z1Ps6kCrnK
8,181
Does Ollama prioritize the use of shared GPU memory?
{ "login": "mydreamworldpolly", "id": 35619739, "node_id": "MDQ6VXNlcjM1NjE5NzM5", "avatar_url": "https://avatars.githubusercontent.com/u/35619739?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mydreamworldpolly", "html_url": "https://github.com/mydreamworldpolly", "followers_url": "https://api.github.com/users/mydreamworldpolly/followers", "following_url": "https://api.github.com/users/mydreamworldpolly/following{/other_user}", "gists_url": "https://api.github.com/users/mydreamworldpolly/gists{/gist_id}", "starred_url": "https://api.github.com/users/mydreamworldpolly/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mydreamworldpolly/subscriptions", "organizations_url": "https://api.github.com/users/mydreamworldpolly/orgs", "repos_url": "https://api.github.com/users/mydreamworldpolly/repos", "events_url": "https://api.github.com/users/mydreamworldpolly/events{/privacy}", "received_events_url": "https://api.github.com/users/mydreamworldpolly/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
2024-12-20T07:58:39
2024-12-20T09:20:09
2024-12-20T09:20:09
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I installed ollama 0.5.1 and try the new settings OLLAMA_FLASH_ATTENTION=1 OLLAMA_KV_CACHE_TYPE=q8_0 and use Qwen2.5-7b Q4 with long context 130000. The VRAM usage has indeed decreased as expected, but Ollama still occupies shared memory instead of fully utilizing the freed-up VRAM, resulting in performance degradation. <img width="360" alt="155617" src="https://github.com/user-attachments/assets/007fd6e4-1ba9-4d50-8474-9f1b967f9b1d" /> ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.5.1
{ "login": "mydreamworldpolly", "id": 35619739, "node_id": "MDQ6VXNlcjM1NjE5NzM5", "avatar_url": "https://avatars.githubusercontent.com/u/35619739?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mydreamworldpolly", "html_url": "https://github.com/mydreamworldpolly", "followers_url": "https://api.github.com/users/mydreamworldpolly/followers", "following_url": "https://api.github.com/users/mydreamworldpolly/following{/other_user}", "gists_url": "https://api.github.com/users/mydreamworldpolly/gists{/gist_id}", "starred_url": "https://api.github.com/users/mydreamworldpolly/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mydreamworldpolly/subscriptions", "organizations_url": "https://api.github.com/users/mydreamworldpolly/orgs", "repos_url": "https://api.github.com/users/mydreamworldpolly/repos", "events_url": "https://api.github.com/users/mydreamworldpolly/events{/privacy}", "received_events_url": "https://api.github.com/users/mydreamworldpolly/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8181/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8181/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/63
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/63/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/63/comments
https://api.github.com/repos/ollama/ollama/issues/63/events
https://github.com/ollama/ollama/issues/63
1,795,890,345
I_kwDOJ0Z1Ps5rCxyp
63
app server should restart if it errors
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 5675428184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUkgpWA", "url": "https://api.github.com/repos/ollama/ollama/labels/app", "name": "app", "color": "000000", "default": false, "description": "" } ]
closed
false
null
[]
null
1
2023-07-10T04:00:34
2023-07-11T17:02:49
2023-07-11T17:02:49
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Currently, if the server errors, it will stop running but the mac app will continue – we should ensure if the mac app is running the server is also always running
{ "login": "hoyyeva", "id": 63033505, "node_id": "MDQ6VXNlcjYzMDMzNTA1", "avatar_url": "https://avatars.githubusercontent.com/u/63033505?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hoyyeva", "html_url": "https://github.com/hoyyeva", "followers_url": "https://api.github.com/users/hoyyeva/followers", "following_url": "https://api.github.com/users/hoyyeva/following{/other_user}", "gists_url": "https://api.github.com/users/hoyyeva/gists{/gist_id}", "starred_url": "https://api.github.com/users/hoyyeva/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hoyyeva/subscriptions", "organizations_url": "https://api.github.com/users/hoyyeva/orgs", "repos_url": "https://api.github.com/users/hoyyeva/repos", "events_url": "https://api.github.com/users/hoyyeva/events{/privacy}", "received_events_url": "https://api.github.com/users/hoyyeva/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/63/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/63/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3895
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3895/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3895/comments
https://api.github.com/repos/ollama/ollama/issues/3895/events
https://github.com/ollama/ollama/pull/3895
2,262,370,075
PR_kwDOJ0Z1Ps5tqRYb
3,895
Move ggml loading to when attempting to fit
{ "login": "brycereitano", "id": 1928691, "node_id": "MDQ6VXNlcjE5Mjg2OTE=", "avatar_url": "https://avatars.githubusercontent.com/u/1928691?v=4", "gravatar_id": "", "url": "https://api.github.com/users/brycereitano", "html_url": "https://github.com/brycereitano", "followers_url": "https://api.github.com/users/brycereitano/followers", "following_url": "https://api.github.com/users/brycereitano/following{/other_user}", "gists_url": "https://api.github.com/users/brycereitano/gists{/gist_id}", "starred_url": "https://api.github.com/users/brycereitano/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/brycereitano/subscriptions", "organizations_url": "https://api.github.com/users/brycereitano/orgs", "repos_url": "https://api.github.com/users/brycereitano/repos", "events_url": "https://api.github.com/users/brycereitano/events{/privacy}", "received_events_url": "https://api.github.com/users/brycereitano/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-04-24T23:55:30
2024-04-25T16:49:44
2024-04-25T16:24:08
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3895", "html_url": "https://github.com/ollama/ollama/pull/3895", "diff_url": "https://github.com/ollama/ollama/pull/3895.diff", "patch_url": "https://github.com/ollama/ollama/pull/3895.patch", "merged_at": "2024-04-25T16:24:08" }
Fixes #3860 This moves the loading of the model until we attempt to see if we need we can fit the model into memory. I decided to not pull it out into a separate function as I don't think the 4 lines warrants it especially after moving some of the logic around. I kept that in a separate commit to easily rollback if the preference is to keep them separated in the main conditional flow. - Tested on a device without a GPU, loading a single model and concurrent requests to multiple models. - Test on a device with a dedicated GPU, limiting it to a single model as well as loading multiple models and making concurrent requests. Additionally, fixed a panic in the tests when running with `-race`.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3895/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3895/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/90
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/90/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/90/comments
https://api.github.com/repos/ollama/ollama/issues/90/events
https://github.com/ollama/ollama/issues/90
1,808,515,195
I_kwDOJ0Z1Ps5ry8B7
90
I think docs/development is wrong
{ "login": "technovangelist", "id": 633681, "node_id": "MDQ6VXNlcjYzMzY4MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4", "gravatar_id": "", "url": "https://api.github.com/users/technovangelist", "html_url": "https://github.com/technovangelist", "followers_url": "https://api.github.com/users/technovangelist/followers", "following_url": "https://api.github.com/users/technovangelist/following{/other_user}", "gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}", "starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions", "organizations_url": "https://api.github.com/users/technovangelist/orgs", "repos_url": "https://api.github.com/users/technovangelist/repos", "events_url": "https://api.github.com/users/technovangelist/events{/privacy}", "received_events_url": "https://api.github.com/users/technovangelist/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-07-17T19:56:10
2023-07-18T12:34:00
2023-07-18T12:34:00
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
We no longer have a makefile. I think it should be install go, then run go build . or is there a different process?
{ "login": "technovangelist", "id": 633681, "node_id": "MDQ6VXNlcjYzMzY4MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4", "gravatar_id": "", "url": "https://api.github.com/users/technovangelist", "html_url": "https://github.com/technovangelist", "followers_url": "https://api.github.com/users/technovangelist/followers", "following_url": "https://api.github.com/users/technovangelist/following{/other_user}", "gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}", "starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions", "organizations_url": "https://api.github.com/users/technovangelist/orgs", "repos_url": "https://api.github.com/users/technovangelist/repos", "events_url": "https://api.github.com/users/technovangelist/events{/privacy}", "received_events_url": "https://api.github.com/users/technovangelist/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/90/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/90/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5942
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5942/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5942/comments
https://api.github.com/repos/ollama/ollama/issues/5942/events
https://github.com/ollama/ollama/issues/5942
2,429,326,734
I_kwDOJ0Z1Ps6QzJWO
5,942
[Website] Allow users to sort models in the library by " (most) recently updated"
{ "login": "Malrama", "id": 1287086, "node_id": "MDQ6VXNlcjEyODcwODY=", "avatar_url": "https://avatars.githubusercontent.com/u/1287086?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Malrama", "html_url": "https://github.com/Malrama", "followers_url": "https://api.github.com/users/Malrama/followers", "following_url": "https://api.github.com/users/Malrama/following{/other_user}", "gists_url": "https://api.github.com/users/Malrama/gists{/gist_id}", "starred_url": "https://api.github.com/users/Malrama/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Malrama/subscriptions", "organizations_url": "https://api.github.com/users/Malrama/orgs", "repos_url": "https://api.github.com/users/Malrama/repos", "events_url": "https://api.github.com/users/Malrama/events{/privacy}", "received_events_url": "https://api.github.com/users/Malrama/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 6573197867, "node_id": "LA_kwDOJ0Z1Ps8AAAABh8sKKw", "url": "https://api.github.com/repos/ollama/ollama/labels/ollama.com", "name": "ollama.com", "color": "ffffff", "default": false, "description": "" } ]
closed
false
null
[]
null
1
2024-07-25T08:13:24
2024-09-17T15:50:07
2024-09-17T15:50:06
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello everyone and greetings. It always bugged me, that I am not able to search the Ollama model library by the "most recently updated" model. Currently, it's only possible to search for the newest model in this regard, but this isn't quite the same. Sometimes I want to look at the library to see if there is an update for the current models I use. Either because it got re-quantized (just as happened with llama 3.1) or a newer version is released. For now, I have to search for that model individually and see, with the "last updated" field, if there is an update for that specific model. Sorting by "recently updated" would allow me to see all model updated within a timeframe with just 1 click! (since they are chronologically ordered) ![Ollama](https://github.com/user-attachments/assets/d8bbe711-5b43-4677-8488-e1be53857483) I added this for illustration. What do you think? Thank you very much.
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5942/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5942/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7687
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7687/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7687/comments
https://api.github.com/repos/ollama/ollama/issues/7687/events
https://github.com/ollama/ollama/issues/7687
2,662,284,642
I_kwDOJ0Z1Ps6erz1i
7,687
Mismatch Between GitHub and PyPI Versions
{ "login": "jaberfooladi", "id": 37275469, "node_id": "MDQ6VXNlcjM3Mjc1NDY5", "avatar_url": "https://avatars.githubusercontent.com/u/37275469?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jaberfooladi", "html_url": "https://github.com/jaberfooladi", "followers_url": "https://api.github.com/users/jaberfooladi/followers", "following_url": "https://api.github.com/users/jaberfooladi/following{/other_user}", "gists_url": "https://api.github.com/users/jaberfooladi/gists{/gist_id}", "starred_url": "https://api.github.com/users/jaberfooladi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jaberfooladi/subscriptions", "organizations_url": "https://api.github.com/users/jaberfooladi/orgs", "repos_url": "https://api.github.com/users/jaberfooladi/repos", "events_url": "https://api.github.com/users/jaberfooladi/events{/privacy}", "received_events_url": "https://api.github.com/users/jaberfooladi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-11-15T15:20:11
2024-11-15T17:33:21
2024-11-15T17:33:21
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? The version on GitHub doesn’t match the version listed on PyPI. Could you clarify which is the latest and ensure they’re synced? ### OS Linux ### GPU Nvidia ### CPU _No response_ ### Ollama version _No response_
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7687/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7687/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3197
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3197/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3197/comments
https://api.github.com/repos/ollama/ollama/issues/3197/events
https://github.com/ollama/ollama/issues/3197
2,190,728,482
I_kwDOJ0Z1Ps6Ck90i
3,197
Completely disable logging on server with env variable
{ "login": "Xpl0itU", "id": 24777100, "node_id": "MDQ6VXNlcjI0Nzc3MTAw", "avatar_url": "https://avatars.githubusercontent.com/u/24777100?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Xpl0itU", "html_url": "https://github.com/Xpl0itU", "followers_url": "https://api.github.com/users/Xpl0itU/followers", "following_url": "https://api.github.com/users/Xpl0itU/following{/other_user}", "gists_url": "https://api.github.com/users/Xpl0itU/gists{/gist_id}", "starred_url": "https://api.github.com/users/Xpl0itU/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Xpl0itU/subscriptions", "organizations_url": "https://api.github.com/users/Xpl0itU/orgs", "repos_url": "https://api.github.com/users/Xpl0itU/repos", "events_url": "https://api.github.com/users/Xpl0itU/events{/privacy}", "received_events_url": "https://api.github.com/users/Xpl0itU/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
0
2024-03-17T15:30:41
2024-03-21T10:08:41
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What are you trying to do? For maximum privacy, it would be nice to be able to disable logging completely ### How should we solve this? Setting an env variable that sets the logging level to none, so absolutely no logs are being outputted ### What is the impact of not solving this? None, it’s just a nice addition for privacy reasons in case ollama is being stored in a shared server ### Anything else? _No response_
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3197/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3197/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/5984
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5984/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5984/comments
https://api.github.com/repos/ollama/ollama/issues/5984/events
https://github.com/ollama/ollama/issues/5984
2,432,285,044
I_kwDOJ0Z1Ps6Q-bl0
5,984
[Linux] Install ollama to /home/user/ space
{ "login": "bebyx", "id": 35663391, "node_id": "MDQ6VXNlcjM1NjYzMzkx", "avatar_url": "https://avatars.githubusercontent.com/u/35663391?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bebyx", "html_url": "https://github.com/bebyx", "followers_url": "https://api.github.com/users/bebyx/followers", "following_url": "https://api.github.com/users/bebyx/following{/other_user}", "gists_url": "https://api.github.com/users/bebyx/gists{/gist_id}", "starred_url": "https://api.github.com/users/bebyx/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bebyx/subscriptions", "organizations_url": "https://api.github.com/users/bebyx/orgs", "repos_url": "https://api.github.com/users/bebyx/repos", "events_url": "https://api.github.com/users/bebyx/events{/privacy}", "received_events_url": "https://api.github.com/users/bebyx/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
1
2024-07-26T13:48:42
2024-09-04T04:27:36
2024-09-04T04:27:35
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I have several partitions on my Linux machine. My system partition (`/`) is small and I didn't have enough space for models. However, I have a lot of space in the `/home/` partition. Setting `$OLLAMA_MODELS` to `/home/user/.ollama/models` didn't help; it still installed them to `/usr/share/ollama/.ollama/models`. I didn't see an option for installing **ollama** to the home space (somewhere under `/home/user/`). So, I created a [script](https://gist.github.com/bebyx/8cecd83a2d7c94a8f7c6352298db7c9e) to do that for my convenience. ```bash #!/bin/bash curl -L https://ollama.com/download/ollama-linux-amd64 -o $HOME/.local/bin/ollama chmod +x $HOME/.local/bin/ollama cat <<EOF > ~/.config/systemd/user/ollama.service [Unit] Description=Ollama Service After=network-online.target [Service] ExecStart=$HOME/.local/bin/ollama serve Restart=always RestartSec=3 [Install] WantedBy=default.target EOF systemctl --user daemon-reload systemctl --user restart ollama ``` Just wondering if you would consider providing an alternative script to install **ollama** to the home user space. My script can be used as a foundation.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5984/reactions", "total_count": 3, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 3, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5984/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5862
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5862/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5862/comments
https://api.github.com/repos/ollama/ollama/issues/5862/events
https://github.com/ollama/ollama/issues/5862
2,423,841,716
I_kwDOJ0Z1Ps6QeOO0
5,862
Context Window Size Issue with Mistral Nemo Model on Ollama Version 0.2.8-rc2 (Apple Mac Silicon M2 Pro)
{ "login": "MrSimonC", "id": 3033044, "node_id": "MDQ6VXNlcjMwMzMwNDQ=", "avatar_url": "https://avatars.githubusercontent.com/u/3033044?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MrSimonC", "html_url": "https://github.com/MrSimonC", "followers_url": "https://api.github.com/users/MrSimonC/followers", "following_url": "https://api.github.com/users/MrSimonC/following{/other_user}", "gists_url": "https://api.github.com/users/MrSimonC/gists{/gist_id}", "starred_url": "https://api.github.com/users/MrSimonC/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MrSimonC/subscriptions", "organizations_url": "https://api.github.com/users/MrSimonC/orgs", "repos_url": "https://api.github.com/users/MrSimonC/repos", "events_url": "https://api.github.com/users/MrSimonC/events{/privacy}", "received_events_url": "https://api.github.com/users/MrSimonC/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
12
2024-07-22T22:02:22
2024-10-01T04:51:25
2024-07-23T22:05:40
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Hey amazing team! I’m experiencing an issue with the context window size when using the new Mistral Nemo model on Ollama version 0.2.8-rc2 on my Apple Mac Silicon M2 Pro. According to the documentation, the context window should be approximately 128,000 large. However, when I run a query under ollama showing the Mistral Nemo model, the context length reported is actually 1.024e+06=1,024,000 (1 million), which is significantly larger than expected (and of course does not align with the expected 128K). ```bash ollama show mistral-nemo Model arch llama parameters 12.2B quantization Q4_0 context length 1.024e+06 embedding length 5120 Parameters stop "[INST]" stop "[/INST]" License " Apache License Version 2.0, January 2004 ``` Additionally, I’ve noticed that my own “needle in a haystack” test consistently fails when using the Mistral Nemo model on Ollama, whereas the same test passes every time with GT4o and the expected context window size of 128,000. I've even taken the model temperature down to 0.3 and even down to 0.1 as recommended on hugging face model page - but no difference. This suggests to me that there may be an issue with the model or its integration with Ollama. Can you help diagnose the issue? Are my observations correct? Are there any other logs or information I can provide to help troubleshoot this problem? I'm using the latest 0.2.8-rc2 on Mac: ```bash ollama --version ollama version is 0.2.8-rc2 ollama list NAME ID SIZE MODIFIED mistral-nemo:latest 4b300b8c6a97 7.1 GB About an hour ago ``` Let me know if you’d like me to add anything else! ### OS macOS ### GPU Apple ### CPU Apple ### Ollama version 0.2.8-rc2
{ "login": "MrSimonC", "id": 3033044, "node_id": "MDQ6VXNlcjMwMzMwNDQ=", "avatar_url": "https://avatars.githubusercontent.com/u/3033044?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MrSimonC", "html_url": "https://github.com/MrSimonC", "followers_url": "https://api.github.com/users/MrSimonC/followers", "following_url": "https://api.github.com/users/MrSimonC/following{/other_user}", "gists_url": "https://api.github.com/users/MrSimonC/gists{/gist_id}", "starred_url": "https://api.github.com/users/MrSimonC/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MrSimonC/subscriptions", "organizations_url": "https://api.github.com/users/MrSimonC/orgs", "repos_url": "https://api.github.com/users/MrSimonC/repos", "events_url": "https://api.github.com/users/MrSimonC/events{/privacy}", "received_events_url": "https://api.github.com/users/MrSimonC/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5862/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5862/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5196
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5196/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5196/comments
https://api.github.com/repos/ollama/ollama/issues/5196/events
https://github.com/ollama/ollama/pull/5196
2,365,276,393
PR_kwDOJ0Z1Ps5zHlW0
5,196
include modelfile messages
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-06-20T21:12:40
2024-07-31T17:18:19
2024-07-31T17:18:17
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5196", "html_url": "https://github.com/ollama/ollama/pull/5196", "diff_url": "https://github.com/ollama/ollama/pull/5196.diff", "patch_url": "https://github.com/ollama/ollama/pull/5196.patch", "merged_at": "2024-07-31T17:18:17" }
this change moves the CLI behaviour of prepending Modelfile messages to chat conversations to the server. This allows API calls to easily use these fields. It also extends this to /api/generate requests where messages will only be prepended if context is empty missing some tests resolves https://github.com/ollama/ollama-python/issues/117 resolves https://github.com/ollama/ollama-js/issues/109
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5196/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5196/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8340
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8340/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8340/comments
https://api.github.com/repos/ollama/ollama/issues/8340/events
https://github.com/ollama/ollama/pull/8340
2,773,492,240
PR_kwDOJ0Z1Ps6G-SHk
8,340
cli/run: add --system and --parameter CLI options
{ "login": "pacien", "id": 1449319, "node_id": "MDQ6VXNlcjE0NDkzMTk=", "avatar_url": "https://avatars.githubusercontent.com/u/1449319?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pacien", "html_url": "https://github.com/pacien", "followers_url": "https://api.github.com/users/pacien/followers", "following_url": "https://api.github.com/users/pacien/following{/other_user}", "gists_url": "https://api.github.com/users/pacien/gists{/gist_id}", "starred_url": "https://api.github.com/users/pacien/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pacien/subscriptions", "organizations_url": "https://api.github.com/users/pacien/orgs", "repos_url": "https://api.github.com/users/pacien/repos", "events_url": "https://api.github.com/users/pacien/events{/privacy}", "received_events_url": "https://api.github.com/users/pacien/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
0
2025-01-07T18:34:54
2025-01-09T22:40:43
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/8340", "html_url": "https://github.com/ollama/ollama/pull/8340", "diff_url": "https://github.com/ollama/ollama/pull/8340.diff", "patch_url": "https://github.com/ollama/ollama/pull/8340.patch", "merged_at": null }
See individual commit messages for: - **cli/run: add --system flag for system message** - **cli/run: add --parameter flag**
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8340/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8340/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8037
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8037/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8037/comments
https://api.github.com/repos/ollama/ollama/issues/8037/events
https://github.com/ollama/ollama/pull/8037
2,731,793,827
PR_kwDOJ0Z1Ps6Ey8ah
8,037
ci: build dir changed
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-12-11T04:32:58
2024-12-11T04:33:40
2024-12-11T04:33:34
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/8037", "html_url": "https://github.com/ollama/ollama/pull/8037", "diff_url": "https://github.com/ollama/ollama/pull/8037.diff", "patch_url": "https://github.com/ollama/ollama/pull/8037.patch", "merged_at": "2024-12-11T04:33:34" }
Remove no longer relevant build log dir The build artifacts are no longer in ./build
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8037/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8037/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1163
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1163/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1163/comments
https://api.github.com/repos/ollama/ollama/issues/1163/events
https://github.com/ollama/ollama/issues/1163
1,998,063,222
I_kwDOJ0Z1Ps53GAZ2
1,163
Segmentation Fault with CPU inference on 0.1.9 on Intel Mac with Monterey
{ "login": "orlyandico", "id": 1325420, "node_id": "MDQ6VXNlcjEzMjU0MjA=", "avatar_url": "https://avatars.githubusercontent.com/u/1325420?v=4", "gravatar_id": "", "url": "https://api.github.com/users/orlyandico", "html_url": "https://github.com/orlyandico", "followers_url": "https://api.github.com/users/orlyandico/followers", "following_url": "https://api.github.com/users/orlyandico/following{/other_user}", "gists_url": "https://api.github.com/users/orlyandico/gists{/gist_id}", "starred_url": "https://api.github.com/users/orlyandico/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/orlyandico/subscriptions", "organizations_url": "https://api.github.com/users/orlyandico/orgs", "repos_url": "https://api.github.com/users/orlyandico/repos", "events_url": "https://api.github.com/users/orlyandico/events{/privacy}", "received_events_url": "https://api.github.com/users/orlyandico/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
11
2023-11-17T01:08:56
2023-11-29T11:13:32
2023-11-29T11:13:32
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I can consistently get an OOM and segfault with longer prompts on 0.1.9 and codellama model on a Mac Pro 2013 trashcan with 128GB of RAM, Ivy Bridge EP Xeon processor (E5-2696 v2) and Monterey 12.7.1. This model has the dual AMD D500 GPU which should not be relevant.. Shorter prompts work fine, e.g. "write python code to connect to postgresql and fetched named columns" But the slightly longer "write code in python to connect to a database with psycopg2 and perform an upsert with bind variables" fails with the error {"timestamp":1700181141,"level":"INFO","function":"main","line":1749,"message":"HTTP server listening","hostname":"127.0.0.1","port":64826} {"timestamp":1700181141,"level":"INFO","function":"log_server_request","line":1240,"message":"request","remote_addr":"127.0.0.1","remote_port":50465,"status":200,"method":"HEAD","path":"/","params":{}} 2023/11/17 00:32:21 llama.go:487: llama runner started in 1.401922 seconds [GIN] 2023/11/17 - 00:32:21 | 200 | 1.623658951s | 127.0.0.1 | POST "/api/generate" {"timestamp":1700181160,"level":"INFO","function":"log_server_request","line":1240,"message":"request","remote_addr":"127.0.0.1","remote_port":50467,"status":200,"method":"HEAD","path":"/","params":{}} 2023/11/17 00:32:40 llama.go:430: signal: segmentation fault 2023/11/17 00:32:40 llama.go:504: llama runner stopped successfully [GIN] 2023/11/17 - 00:32:40 | 200 | 247.513143ms | 127.0.0.1 | POST "/api/generate" 2023/11/17 00:37:40 llama.go:504: llama runner stopped successfully This error also appears during startup (but this error also shows up in the i9-8880H so it's not specific to the old Xeon): 2023/11/17 00:32:20 llama.go:415: starting llama runner 2023/11/17 00:32:20 llama.go:417: error starting the external llama runner: fork/exec /var/folders/kq/gx8q5jw525zfzzxy2k1y5fz40000gn/T/ollama3231579164/llama.cpp/gguf/build/metal/bin/ollama-runner: bad CPU type in executable 2023/11/17 00:32:20 llama.go:415: starting llama runner The exact same setup and prompts work fine on a 2019 Macbook Pro with Radeon Pro 5500M, 16GB RAM, and Coffee Lake i9-9880H running Ventura 13.4.1(c). GPU shouldn't be an issue either as it is unused (can confirm 800% CPU utilisation during inference).
{ "login": "orlyandico", "id": 1325420, "node_id": "MDQ6VXNlcjEzMjU0MjA=", "avatar_url": "https://avatars.githubusercontent.com/u/1325420?v=4", "gravatar_id": "", "url": "https://api.github.com/users/orlyandico", "html_url": "https://github.com/orlyandico", "followers_url": "https://api.github.com/users/orlyandico/followers", "following_url": "https://api.github.com/users/orlyandico/following{/other_user}", "gists_url": "https://api.github.com/users/orlyandico/gists{/gist_id}", "starred_url": "https://api.github.com/users/orlyandico/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/orlyandico/subscriptions", "organizations_url": "https://api.github.com/users/orlyandico/orgs", "repos_url": "https://api.github.com/users/orlyandico/repos", "events_url": "https://api.github.com/users/orlyandico/events{/privacy}", "received_events_url": "https://api.github.com/users/orlyandico/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1163/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1163/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/489
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/489/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/489/comments
https://api.github.com/repos/ollama/ollama/issues/489/events
https://github.com/ollama/ollama/issues/489
1,886,624,436
I_kwDOJ0Z1Ps5wc5q0
489
Can't build custom models
{ "login": "selfagency", "id": 2541728, "node_id": "MDQ6VXNlcjI1NDE3Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/2541728?v=4", "gravatar_id": "", "url": "https://api.github.com/users/selfagency", "html_url": "https://github.com/selfagency", "followers_url": "https://api.github.com/users/selfagency/followers", "following_url": "https://api.github.com/users/selfagency/following{/other_user}", "gists_url": "https://api.github.com/users/selfagency/gists{/gist_id}", "starred_url": "https://api.github.com/users/selfagency/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/selfagency/subscriptions", "organizations_url": "https://api.github.com/users/selfagency/orgs", "repos_url": "https://api.github.com/users/selfagency/repos", "events_url": "https://api.github.com/users/selfagency/events{/privacy}", "received_events_url": "https://api.github.com/users/selfagency/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2023-09-07T22:17:31
2023-10-23T16:33:41
2023-10-23T16:33:40
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I'm trying to build a custom model from a model I have already successfully pulled and used locally, but no matter what I do, it throws an error: ```Modelfile FROM codellama:latest SYSTEM """ You are an expert in Nushell. """ ``` ```sh ❯ ollama list NAME ID SIZE MODIFIED codellama:latest 36893bf9bc7f 3.8 GB 10 minutes ago llama2:13b 984c614c4037 7.3 GB 24 minutes ago llama2:latest 5c1a4ea68dd0 3.8 GB 7 minutes ago ❯ ollama create nushell -f ./Modelfile parsing modelfile looking for model creating model system layer creating model license layer looking for model pulling model file ⠋ pulling manifest Error: pull model manifest: model not found ``` I'm running v0.0.18 installed via Homebrew on MacOS Sonoma Beta.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/489/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/489/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5983
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5983/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5983/comments
https://api.github.com/repos/ollama/ollama/issues/5983/events
https://github.com/ollama/ollama/issues/5983
2,432,229,999
I_kwDOJ0Z1Ps6Q-OJv
5,983
Distributed Computing: Run single large model on multiple machines
{ "login": "mrmiket64", "id": 99057519, "node_id": "U_kgDOBed_bw", "avatar_url": "https://avatars.githubusercontent.com/u/99057519?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mrmiket64", "html_url": "https://github.com/mrmiket64", "followers_url": "https://api.github.com/users/mrmiket64/followers", "following_url": "https://api.github.com/users/mrmiket64/following{/other_user}", "gists_url": "https://api.github.com/users/mrmiket64/gists{/gist_id}", "starred_url": "https://api.github.com/users/mrmiket64/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mrmiket64/subscriptions", "organizations_url": "https://api.github.com/users/mrmiket64/orgs", "repos_url": "https://api.github.com/users/mrmiket64/repos", "events_url": "https://api.github.com/users/mrmiket64/events{/privacy}", "received_events_url": "https://api.github.com/users/mrmiket64/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
2
2024-07-26T13:20:15
2024-07-26T18:41:46
2024-07-26T18:41:46
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello dear Ollama team. I'd like to request a new feature in Ollama that allows a single large model to be run across multiple machines for inference. This would enable users to take advantage of distributed computing to handle very large models that would exceed the memory capacity of a single machine. That functionality could allow to: - Make more accessible to run models like llama 3.1 405b - Distribute the load when running several smaller models to serve a large user base Thank you for all of your great work, it is really making a difference for me. All the Best Miguel
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5983/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5983/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5696
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5696/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5696/comments
https://api.github.com/repos/ollama/ollama/issues/5696/events
https://github.com/ollama/ollama/issues/5696
2,407,905,668
I_kwDOJ0Z1Ps6PhbmE
5,696
The model cannot be loaded into the gpu
{ "login": "IanNOV13", "id": 100789609, "node_id": "U_kgDOBgHtaQ", "avatar_url": "https://avatars.githubusercontent.com/u/100789609?v=4", "gravatar_id": "", "url": "https://api.github.com/users/IanNOV13", "html_url": "https://github.com/IanNOV13", "followers_url": "https://api.github.com/users/IanNOV13/followers", "following_url": "https://api.github.com/users/IanNOV13/following{/other_user}", "gists_url": "https://api.github.com/users/IanNOV13/gists{/gist_id}", "starred_url": "https://api.github.com/users/IanNOV13/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/IanNOV13/subscriptions", "organizations_url": "https://api.github.com/users/IanNOV13/orgs", "repos_url": "https://api.github.com/users/IanNOV13/repos", "events_url": "https://api.github.com/users/IanNOV13/events{/privacy}", "received_events_url": "https://api.github.com/users/IanNOV13/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-07-15T05:45:37
2024-07-15T06:04:22
2024-07-15T06:04:22
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? After updating to version 0.2.5, the gemma2:9b model cannot be loaded into the GPU. After I downgraded the version back to 0.2.4, the model was successfully loaded into the GPU. ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.2.5
{ "login": "IanNOV13", "id": 100789609, "node_id": "U_kgDOBgHtaQ", "avatar_url": "https://avatars.githubusercontent.com/u/100789609?v=4", "gravatar_id": "", "url": "https://api.github.com/users/IanNOV13", "html_url": "https://github.com/IanNOV13", "followers_url": "https://api.github.com/users/IanNOV13/followers", "following_url": "https://api.github.com/users/IanNOV13/following{/other_user}", "gists_url": "https://api.github.com/users/IanNOV13/gists{/gist_id}", "starred_url": "https://api.github.com/users/IanNOV13/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/IanNOV13/subscriptions", "organizations_url": "https://api.github.com/users/IanNOV13/orgs", "repos_url": "https://api.github.com/users/IanNOV13/repos", "events_url": "https://api.github.com/users/IanNOV13/events{/privacy}", "received_events_url": "https://api.github.com/users/IanNOV13/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5696/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5696/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/420
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/420/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/420/comments
https://api.github.com/repos/ollama/ollama/issues/420/events
https://github.com/ollama/ollama/pull/420
1,868,144,218
PR_kwDOJ0Z1Ps5Y2imz
420
add 34b to mem check
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-08-26T15:29:38
2023-08-26T21:15:54
2023-08-26T21:15:52
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/420", "html_url": "https://github.com/ollama/ollama/pull/420", "diff_url": "https://github.com/ollama/ollama/pull/420.diff", "patch_url": "https://github.com/ollama/ollama/pull/420.patch", "merged_at": "2023-08-26T21:15:52" }
null
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/420/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/420/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3995
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3995/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3995/comments
https://api.github.com/repos/ollama/ollama/issues/3995/events
https://github.com/ollama/ollama/issues/3995
2,267,376,397
I_kwDOJ0Z1Ps6HJWsN
3,995
Issues with Llama3:70b Model When stream is Set to False
{ "login": "JIAQIA", "id": 2863932, "node_id": "MDQ6VXNlcjI4NjM5MzI=", "avatar_url": "https://avatars.githubusercontent.com/u/2863932?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JIAQIA", "html_url": "https://github.com/JIAQIA", "followers_url": "https://api.github.com/users/JIAQIA/followers", "following_url": "https://api.github.com/users/JIAQIA/following{/other_user}", "gists_url": "https://api.github.com/users/JIAQIA/gists{/gist_id}", "starred_url": "https://api.github.com/users/JIAQIA/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JIAQIA/subscriptions", "organizations_url": "https://api.github.com/users/JIAQIA/orgs", "repos_url": "https://api.github.com/users/JIAQIA/repos", "events_url": "https://api.github.com/users/JIAQIA/events{/privacy}", "received_events_url": "https://api.github.com/users/JIAQIA/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-04-28T06:27:27
2024-05-10T08:12:17
2024-05-09T21:49:40
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When the stream parameter is set to True, the Llama3:70b model functions correctly. Here is the pytest code used for testing streaming output: ```pytest def test_ollama3_70b_original_stream_completion() -> None: """ 测试本地Ollama接口-流式输出 Returns: """ host = "http://localhost" port = 11434 client = httpx.Client(base_url=f"{host}:{port}") json_info = { "model": "llama3:70b", "messages": [ { "role": "user", "content": "Help me make a travel guide to Yosemite National Park in 200 words?", } ], "stream": True, } with client.stream("POST", "/api/chat", json=json_info) as response: # noqa for line in response.iter_lines(): print(line) print("-" * 10) ``` For the standard Llama3(Not 70b!) model with stream set to False, the API works as expected: ```pytest def test_ollama3_chat_completion() -> None: """ 测试本地Ollama接口-普通输出 Returns: """ host = "http://localhost" port = 11434 client = httpx.Client(base_url=f"{host}:{port}") json_info = { "model": "llama3", "messages": [ { "role": "user", "content": "Help me make a travel guide to Yosemite National Park in 200 words?", } ], "stream": False, } response = client.post("/api/chat", json=json_info) print(response.json()) ``` However, when using the Llama3:70b model with stream set to False, the request times out, resulting in an httpx.ReadTimeout exception: ```pytest def test_ollama3_70b_chat_completion() -> None: """ 测试本地Ollama llama3:70b接口-普通输出 Returns: """ host = "http://localhost" port = 11434 client = httpx.Client(base_url=f"{host}:{port}") json_info = { "model": "llama3:70b", "messages": [ { "role": "user", "content": "Help me make a travel guide to Yosemite National Park in 200 words?", } ], "stream": False, } response = client.post("/api/chat", json=json_info) print(response.json()) ``` FYI Hardware: Hardware Overview: Model Name: Mac Studio Model Identifier: Mac14,14 Model Number: Z1800001HCH/A Chip: Apple M2 Ultra Total Number of Cores: 24 (16 performance and 8 efficiency) Memory: 192 GB System Firmware Version: 8422.141.2 OS Loader Version: 8422.141.2 Serial Number (system): G2P4927DXG Hardware UUID: D9B7B9C2-D50B-5EC6-A5BE-D6403AF1ADA2 Provisioning UDID: 00006022-000451D90E90A01E Activation Lock Status: Enabled ### OS macOS ### GPU Apple ### CPU Apple ### Ollama version 0.1.32
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3995/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3995/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7082
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7082/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7082/comments
https://api.github.com/repos/ollama/ollama/issues/7082/events
https://github.com/ollama/ollama/pull/7082
2,562,561,646
PR_kwDOJ0Z1Ps59bDIP
7,082
llama: fix compiler flag differences
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-10-02T20:15:43
2024-10-03T20:22:27
2024-10-03T20:22:24
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7082", "html_url": "https://github.com/ollama/ollama/pull/7082", "diff_url": "https://github.com/ollama/ollama/pull/7082.diff", "patch_url": "https://github.com/ollama/ollama/pull/7082.patch", "merged_at": "2024-10-03T20:22:24" }
Adjust the flags for the new Go server to more closely match the generate flow Still more refinement to do, but I think this gets us closer. Comparisons on an Ryzen 9 7950X system (windows) (linux) This PR: llama3.2 tps - cpu: 9.17(w) 4.07(l) - cpu_avx: 14.19(w) 14.78(l) - cpu_avx2: 14.3(w) 15.3(l) C++ server (0.3.12 release) - cpu: 5.9(w) 7.34(l) - cpu_avx: 17.5(w) 21.47(l) - cpu_avx2: 19 tps(w) 22.63(l)
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7082/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7082/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2900
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2900/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2900/comments
https://api.github.com/repos/ollama/ollama/issues/2900/events
https://github.com/ollama/ollama/issues/2900
2,165,511,473
I_kwDOJ0Z1Ps6BExUx
2,900
using starcoder 15b, 3b, and standard returning end of file
{ "login": "michaelmandich", "id": 85703638, "node_id": "MDQ6VXNlcjg1NzAzNjM4", "avatar_url": "https://avatars.githubusercontent.com/u/85703638?v=4", "gravatar_id": "", "url": "https://api.github.com/users/michaelmandich", "html_url": "https://github.com/michaelmandich", "followers_url": "https://api.github.com/users/michaelmandich/followers", "following_url": "https://api.github.com/users/michaelmandich/following{/other_user}", "gists_url": "https://api.github.com/users/michaelmandich/gists{/gist_id}", "starred_url": "https://api.github.com/users/michaelmandich/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/michaelmandich/subscriptions", "organizations_url": "https://api.github.com/users/michaelmandich/orgs", "repos_url": "https://api.github.com/users/michaelmandich/repos", "events_url": "https://api.github.com/users/michaelmandich/events{/privacy}", "received_events_url": "https://api.github.com/users/michaelmandich/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-03-03T19:06:42
2024-03-03T19:15:01
2024-03-03T19:15:00
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
success Error: Post "http://127.0.0.1:11434/api/chat": EOF /Downloads$ attempted with both version 27 and 28 on windows and Linux, same response on both. appears to be an issue with how star coder is being setup.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2900/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2900/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5601
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5601/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5601/comments
https://api.github.com/repos/ollama/ollama/issues/5601/events
https://github.com/ollama/ollama/pull/5601
2,400,741,349
PR_kwDOJ0Z1Ps5094fi
5,601
Configure the systemd service via a separate file.
{ "login": "ykhrustalev", "id": 737733, "node_id": "MDQ6VXNlcjczNzczMw==", "avatar_url": "https://avatars.githubusercontent.com/u/737733?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ykhrustalev", "html_url": "https://github.com/ykhrustalev", "followers_url": "https://api.github.com/users/ykhrustalev/followers", "following_url": "https://api.github.com/users/ykhrustalev/following{/other_user}", "gists_url": "https://api.github.com/users/ykhrustalev/gists{/gist_id}", "starred_url": "https://api.github.com/users/ykhrustalev/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ykhrustalev/subscriptions", "organizations_url": "https://api.github.com/users/ykhrustalev/orgs", "repos_url": "https://api.github.com/users/ykhrustalev/repos", "events_url": "https://api.github.com/users/ykhrustalev/events{/privacy}", "received_events_url": "https://api.github.com/users/ykhrustalev/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
5
2024-07-10T13:17:18
2024-08-14T16:57:08
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5601", "html_url": "https://github.com/ollama/ollama/pull/5601", "diff_url": "https://github.com/ollama/ollama/pull/5601.diff", "patch_url": "https://github.com/ollama/ollama/pull/5601.patch", "merged_at": null }
Instead of changing the systemd unit file, use a separate config for the configuration. refs #3516
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5601/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5601/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/325
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/325/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/325/comments
https://api.github.com/repos/ollama/ollama/issues/325/events
https://github.com/ollama/ollama/pull/325
1,846,014,520
PR_kwDOJ0Z1Ps5XrwZm
325
s/parmeter/parameter/
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-08-10T23:27:11
2023-08-11T00:30:03
2023-08-11T00:30:02
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/325", "html_url": "https://github.com/ollama/ollama/pull/325", "diff_url": "https://github.com/ollama/ollama/pull/325.diff", "patch_url": "https://github.com/ollama/ollama/pull/325.patch", "merged_at": "2023-08-11T00:30:02" }
null
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/325/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/325/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3369
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3369/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3369/comments
https://api.github.com/repos/ollama/ollama/issues/3369/events
https://github.com/ollama/ollama/issues/3369
2,210,418,734
I_kwDOJ0Z1Ps6DwFAu
3,369
Pull a model on start or without requiring serve
{ "login": "0x77dev", "id": 46429701, "node_id": "MDQ6VXNlcjQ2NDI5NzAx", "avatar_url": "https://avatars.githubusercontent.com/u/46429701?v=4", "gravatar_id": "", "url": "https://api.github.com/users/0x77dev", "html_url": "https://github.com/0x77dev", "followers_url": "https://api.github.com/users/0x77dev/followers", "following_url": "https://api.github.com/users/0x77dev/following{/other_user}", "gists_url": "https://api.github.com/users/0x77dev/gists{/gist_id}", "starred_url": "https://api.github.com/users/0x77dev/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/0x77dev/subscriptions", "organizations_url": "https://api.github.com/users/0x77dev/orgs", "repos_url": "https://api.github.com/users/0x77dev/repos", "events_url": "https://api.github.com/users/0x77dev/events{/privacy}", "received_events_url": "https://api.github.com/users/0x77dev/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 6677677816, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgVG-A", "url": "https://api.github.com/repos/ollama/ollama/labels/docker", "name": "docker", "color": "0052CC", "default": false, "description": "Issues relating to using ollama in containers" } ]
open
false
null
[]
null
11
2024-03-27T10:26:52
2025-01-22T17:28:44
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What are you trying to do? To enhance the user experience when deploying Ollama with models in a containerized environment, it would be beneficial to enable pre-embedding a model into the image through a custom Dockerfile or pulling a model upon starting Ollama by specifying an argument or environment variable. This eliminates the need for an API request after the container starts. ### How should we solve this? - `ollama serve --pull [models]` - `OLLAMA_PULL=model1,model2 ollama serve` - `ollama pull` without `ollama serve` (a bit harder to implement option, but improves the ability to create custom images with custom models beyond pulling) ### What is the impact of not solving this? This is a significant improvement for hosting Ollama. Without it, deploying Ollama, especially in a production environment, would be more challenging. ### Anything else? Related: - https://github.com/ollama/ollama/issues/1322 - https://github.com/ollama/ollama/issues/358#issuecomment-2022394098
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3369/reactions", "total_count": 9, "+1": 9, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3369/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/8280
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8280/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8280/comments
https://api.github.com/repos/ollama/ollama/issues/8280/events
https://github.com/ollama/ollama/issues/8280
2,765,053,868
I_kwDOJ0Z1Ps6kz1-s
8,280
Reverse download Progress During Model Pull Ollama pull phi3:medium
{ "login": "Bakht-Ullah", "id": 150267165, "node_id": "U_kgDOCPTlHQ", "avatar_url": "https://avatars.githubusercontent.com/u/150267165?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Bakht-Ullah", "html_url": "https://github.com/Bakht-Ullah", "followers_url": "https://api.github.com/users/Bakht-Ullah/followers", "following_url": "https://api.github.com/users/Bakht-Ullah/following{/other_user}", "gists_url": "https://api.github.com/users/Bakht-Ullah/gists{/gist_id}", "starred_url": "https://api.github.com/users/Bakht-Ullah/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Bakht-Ullah/subscriptions", "organizations_url": "https://api.github.com/users/Bakht-Ullah/orgs", "repos_url": "https://api.github.com/users/Bakht-Ullah/repos", "events_url": "https://api.github.com/users/Bakht-Ullah/events{/privacy}", "received_events_url": "https://api.github.com/users/Bakht-Ullah/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
2
2025-01-01T12:51:14
2025-01-07T06:54:02
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? While attempting to pull a model using Ollama, I am facing the inconsistent increase decrease in the progress while pull the model from ollama using command e.g ollama pull phi3:medium. what can be the reason behind this. Note: having a stable internet connection. ### OS Windows ### GPU Nvidia ### CPU _No response_ ### Ollama version 0.5.4
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8280/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8280/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/6396
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6396/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6396/comments
https://api.github.com/repos/ollama/ollama/issues/6396/events
https://github.com/ollama/ollama/issues/6396
2,471,200,922
I_kwDOJ0Z1Ps6TS4ia
6,396
pulling manifest Error: pull model manifest: 412: The model you are attempting to pull requires a newer version of Ollama. Please download the latest version at: https://ollama.com/download
{ "login": "jzhang82119", "id": 49737178, "node_id": "MDQ6VXNlcjQ5NzM3MTc4", "avatar_url": "https://avatars.githubusercontent.com/u/49737178?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jzhang82119", "html_url": "https://github.com/jzhang82119", "followers_url": "https://api.github.com/users/jzhang82119/followers", "following_url": "https://api.github.com/users/jzhang82119/following{/other_user}", "gists_url": "https://api.github.com/users/jzhang82119/gists{/gist_id}", "starred_url": "https://api.github.com/users/jzhang82119/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jzhang82119/subscriptions", "organizations_url": "https://api.github.com/users/jzhang82119/orgs", "repos_url": "https://api.github.com/users/jzhang82119/repos", "events_url": "https://api.github.com/users/jzhang82119/events{/privacy}", "received_events_url": "https://api.github.com/users/jzhang82119/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-08-17T00:01:32
2024-08-17T01:47:35
2024-08-17T01:47:06
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? curl -fsSL https://ollama.com/install.sh | sh ``` >>> Downloading ollama... ######################################################################## 100.0%##O#-# >>> Installing ollama to /usr/local/bin... >>> Creating ollama user... >>> Adding ollama user to render group... >>> Adding ollama user to video group... >>> Adding current user to ollama group... >>> Creating ollama systemd service... >>> Enabling and starting ollama service... Created symlink /etc/systemd/system/default.target.wants/ollama.service → /etc/systemd/system/ollama.service. >>> Compatible AMD GPU ROCm library detected at /opt/rocm >>> The Ollama API is now available at 127.0.0.1:11434. >>> Install complete. Run "ollama" from the command line. ``` ollama run llama3.1:405b ``` pulling manifest Error: pull model manifest: 412: The model you are attempting to pull requires a newer version of Ollama. Please download the latest version at: https://ollama.com/download ``` ollama --version ``` ollama version is 0.3.0 Warning: client version is 0.3.6 ``` I have already followed manual or automatic way to install ollama. But none of them works. Can you help? ### OS Linux ### GPU AMD ### CPU Intel ### Ollama version ollama version is 0.3.0 Warning: client version is 0.3.6
{ "login": "jzhang82119", "id": 49737178, "node_id": "MDQ6VXNlcjQ5NzM3MTc4", "avatar_url": "https://avatars.githubusercontent.com/u/49737178?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jzhang82119", "html_url": "https://github.com/jzhang82119", "followers_url": "https://api.github.com/users/jzhang82119/followers", "following_url": "https://api.github.com/users/jzhang82119/following{/other_user}", "gists_url": "https://api.github.com/users/jzhang82119/gists{/gist_id}", "starred_url": "https://api.github.com/users/jzhang82119/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jzhang82119/subscriptions", "organizations_url": "https://api.github.com/users/jzhang82119/orgs", "repos_url": "https://api.github.com/users/jzhang82119/repos", "events_url": "https://api.github.com/users/jzhang82119/events{/privacy}", "received_events_url": "https://api.github.com/users/jzhang82119/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6396/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6396/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1977
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1977/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1977/comments
https://api.github.com/repos/ollama/ollama/issues/1977/events
https://github.com/ollama/ollama/issues/1977
2,080,389,095
I_kwDOJ0Z1Ps58ADfn
1,977
Mistakes in template definitions on models available to download from https://ollama.ai
{ "login": "jukofyork", "id": 69222624, "node_id": "MDQ6VXNlcjY5MjIyNjI0", "avatar_url": "https://avatars.githubusercontent.com/u/69222624?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jukofyork", "html_url": "https://github.com/jukofyork", "followers_url": "https://api.github.com/users/jukofyork/followers", "following_url": "https://api.github.com/users/jukofyork/following{/other_user}", "gists_url": "https://api.github.com/users/jukofyork/gists{/gist_id}", "starred_url": "https://api.github.com/users/jukofyork/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jukofyork/subscriptions", "organizations_url": "https://api.github.com/users/jukofyork/orgs", "repos_url": "https://api.github.com/users/jukofyork/repos", "events_url": "https://api.github.com/users/jukofyork/events{/privacy}", "received_events_url": "https://api.github.com/users/jukofyork/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
18
2024-01-13T16:55:11
2025-01-14T02:51:47
2025-01-14T02:51:47
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi, Some of the mistakes in the `TEMPLATE` definitions for the models you can download from https://ollama.ai are hurting the models to varying degrees. I only found this by accident when experimenting with the API to use some of the code completion / code editing prompts used by the continue project (https://github.com/continuedev/continue/tree/main/core/llm/templates). I've sourced all these primarily by looking at the original tokenizer config and failing that, looking through the official descriptions and/or their respective official Github discussions. I've concentrated on the original/official models (other than `phind-codellama`) as it's hard to find any concrete info on a lot of the "bootleg" fine-tuned models. The ones which are particularly effected are: - `codellama` missing the space before the response **severely** hurts the performance when presented with a large section code. There is a lot of 'cargo cult' prompt templates for `codellama` going around, but this one can be confirmed from their official release page and the tokenizer config. - `deepseek-llm` having the system message prepended to every message seems to increase the chance of responding in Chinese Unicode characters (Deepseek say specifically it wasn't trained to use a system message). - `deepseek-coder` quickly fills its context when discussing large sections of code and will start to repeat the system message back at you before completely descending into gibberish (this happens very quickly if using a detailed / long custom system message). `llama2` doesn't seem too effected by the missing the space before the response , but again this template can be confirmed from their official release page and the tokenizer config. `deepseek-llm`, `mixtral` and `mistral` absolutely should **NOT** have a space or newline before the response or they will often respond with gibberish and/or Chinese Unicode characters. The official `mixtral` huggingface page actually tells you a slightly wrong template format, but the original tokenizer config is the same as `mistral`. The suggestion for adding "**Response**" to `phind-codellama` is from the huggingface discussion, so can't confirm if this is true or not. **codellama:34b-instruct:** ``` TEMPLATE """<s>[INST] {{ if and .First .System }}<<SYS>> {{ .System }} <</SYS>> {{ end }}{{ .Prompt }} [/INST] {{ .Response }}""" ``` ---- **deepseek-coder:33b-instruct:** ``` TEMPLATE """{{ if and .First .System }}{{ .System }} {{ end }}### Instruction: {{ .Prompt }} ### Response: {{ .Response }}""" ``` ---- **deepseek-llm:67b-chat:** ``` TEMPLATE """User: {{ if and .First .System }}{{ .System }} {{ end }}{{ .Prompt }} Assistant:{{ .Response }}""" ``` ---- **llama2:70b-chat:** ``` TEMPLATE """<s>[INST] {{ if and .First .System }}<<SYS>> {{ .System }} <</SYS>> {{ end }}{{ .Prompt }} [/INST] {{ .Response }}""" ``` ---- **mixtral:8x7b-instruct-v0.1 & mistral:7b-instruct-v0.2:** ``` TEMPLATE """{{ if .First }}<s>{{ end }}[INST] {{ if and .First .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST]{{ .Response }}""" ``` ---- **phind-codellama:34b-v2:** ``` TEMPLATE """{{ if and .First .System }}### System Prompt {{ .System }} {{ end }}### User Message {{ .Prompt }} ### Assistant Response {{ .Response }}""" ``` ---- **yi:34b-chat:** ``` TEMPLATE """{{ if and .First .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}<|im_start|>user {{ .Prompt }}<|im_end|> <|im_start|>assistant {{ .Response }}""" ``` ---- These two aren't listed on https://ollama.ai but also use the same "ChatML" template as `yi`: **mpt:30B-chat:** ``` TEMPLATE """{{ if and .First .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}<|im_start|>user {{ .Prompt }}<|im_end|> <|im_start|>assistant {{ .Response }}""" ``` ---- **qwen:72b-chat:** ``` TEMPLATE """{{ if and .First .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}<|im_start|>user {{ .Prompt }}<|im_end|> <|im_start|>assistant {{ .Response }}""" ``` ---- Are there any other "non-bootleg" models I should look at? I might as well do them too if there are any.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1977/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1977/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8619
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8619/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8619/comments
https://api.github.com/repos/ollama/ollama/issues/8619/events
https://github.com/ollama/ollama/issues/8619
2,814,107,366
I_kwDOJ0Z1Ps6nu97m
8,619
s390x support for the install.sh
{ "login": "maredua1", "id": 122402231, "node_id": "U_kgDOB0u1tw", "avatar_url": "https://avatars.githubusercontent.com/u/122402231?v=4", "gravatar_id": "", "url": "https://api.github.com/users/maredua1", "html_url": "https://github.com/maredua1", "followers_url": "https://api.github.com/users/maredua1/followers", "following_url": "https://api.github.com/users/maredua1/following{/other_user}", "gists_url": "https://api.github.com/users/maredua1/gists{/gist_id}", "starred_url": "https://api.github.com/users/maredua1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/maredua1/subscriptions", "organizations_url": "https://api.github.com/users/maredua1/orgs", "repos_url": "https://api.github.com/users/maredua1/repos", "events_url": "https://api.github.com/users/maredua1/events{/privacy}", "received_events_url": "https://api.github.com/users/maredua1/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
0
2025-01-27T21:34:47
2025-01-27T21:34:47
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
It would be good if the install.sh script be capable of ollama install in s390x machines, it would make life easier for multiple developers currently working on it. curl -fsSL https://ollama.com/install.sh | sh You may say that you current don't have a s390x to develop it , you can get a trial one for 120 days [here ](https://community.ibm.com/zsystems/form/l1cc-oss-vm-request/) . The process to build it from source works fine (make -j 5) command as it works fine for any linux other arch/x86/arm machines. Thanks
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8619/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8619/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/5977
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5977/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5977/comments
https://api.github.com/repos/ollama/ollama/issues/5977/events
https://github.com/ollama/ollama/issues/5977
2,431,763,793
I_kwDOJ0Z1Ps6Q8cVR
5,977
After changing settings with set, model crashes (sometimes 2/3 of time out of 3 sets of set commands)
{ "login": "mcDandy", "id": 18588943, "node_id": "MDQ6VXNlcjE4NTg4OTQz", "avatar_url": "https://avatars.githubusercontent.com/u/18588943?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mcDandy", "html_url": "https://github.com/mcDandy", "followers_url": "https://api.github.com/users/mcDandy/followers", "following_url": "https://api.github.com/users/mcDandy/following{/other_user}", "gists_url": "https://api.github.com/users/mcDandy/gists{/gist_id}", "starred_url": "https://api.github.com/users/mcDandy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mcDandy/subscriptions", "organizations_url": "https://api.github.com/users/mcDandy/orgs", "repos_url": "https://api.github.com/users/mcDandy/repos", "events_url": "https://api.github.com/users/mcDandy/events{/privacy}", "received_events_url": "https://api.github.com/users/mcDandy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
3
2024-07-26T08:58:36
2024-07-26T21:04:35
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I turned off wordwrap and turned it back on. It crashed. It only crashes when I change settings. The output is ``` Perhaps starting with something less disruptive than turning everyone's mashed potatoes into glowing orbs would be wise. But where was the fun in that? >>> /? Available Commands: /set Set session variables /show Show model information /load <model> Load a session or model /save <model> Save your current session /clear Clear session context /bye Exit /?, /help Help for a command /? shortcuts Help for keyboard shortcuts Use """ to begin a multi-line message. >>> /save story_stella_fairy Created new model 'story_stella_fairy' >>> /? Available Commands: /set Set session variables /show Show model information /load <model> Load a session or model /save <model> Save your current session /clear Clear session context /bye Exit /?, /help Help for a command /? shortcuts Help for keyboard shortcuts Use """ to begin a multi-line message. >>> ... <start_of_turn>user She had to learn control. What if she accidentally converted something into plasma by nullyfying ... the electromagnetic force holding atoms together. <end_of_turn> <start_of_turn>model Error: error reading llm response: read tcp 127.0.0.1:55314->127.0.0.1:55004: wsarecv: Stávající připojení bylo vynuceně ukončeno vzdáleným hostitelem. ``` ``` "I have a lot to learn," she said, realizing the enormity of the path ahead. >>> /set Available Commands: /set parameter ... Set a parameter /set system <string> Set system message /set history Enable history /set nohistory Disable history /set wordwrap Enable wordwrap /set nowordwrap Disable wordwrap /set format json Enable JSON mode /set noformat Disable formatting /set verbose Show LLM stats /set quiet Disable LLM stats >>> /set nowordwrap Set 'nowordwrap' mode. >>>/set wordwrap Set 'wordwrap' mode. >>> <start_of_turn>user She at least had less of a problem with the part of elec ... romagnetism which does not govern matter stability or directly it's temperat ... ure. Electromagnetic waves. Espetially the portion of them we call light.<en ... d_of_turn> <start_of_turn>model Error: llama runner process has terminated: CUDA error" PS F:\Users\user\OneDrive - Personal\Dokumenty\LLMs\gemma2.27b> ``` I am using NVIDIA RTX 4080 Laptop; intel core i9-13950HX and windows11 ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.2.8
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5977/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/5977/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/4377
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4377/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4377/comments
https://api.github.com/repos/ollama/ollama/issues/4377/events
https://github.com/ollama/ollama/issues/4377
2,291,357,863
I_kwDOJ0Z1Ps6Ik1in
4,377
Request to adapt OpenAI’s GPT-4 vision API
{ "login": "heshengtao", "id": 20573295, "node_id": "MDQ6VXNlcjIwNTczMjk1", "avatar_url": "https://avatars.githubusercontent.com/u/20573295?v=4", "gravatar_id": "", "url": "https://api.github.com/users/heshengtao", "html_url": "https://github.com/heshengtao", "followers_url": "https://api.github.com/users/heshengtao/followers", "following_url": "https://api.github.com/users/heshengtao/following{/other_user}", "gists_url": "https://api.github.com/users/heshengtao/gists{/gist_id}", "starred_url": "https://api.github.com/users/heshengtao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/heshengtao/subscriptions", "organizations_url": "https://api.github.com/users/heshengtao/orgs", "repos_url": "https://api.github.com/users/heshengtao/repos", "events_url": "https://api.github.com/users/heshengtao/events{/privacy}", "received_events_url": "https://api.github.com/users/heshengtao/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
1
2024-05-12T14:29:15
2024-06-05T15:24:54
2024-06-05T15:24:54
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Thank you very much ollama for making it easy for us. When I am using LLaMA, I want to invoke LLaMA’s visual capabilities. However, following OpenAI’s request method does not allow the model to see the image; the model thinks I have only entered a JSON string containing the image encoding.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4377/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4377/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5903
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5903/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5903/comments
https://api.github.com/repos/ollama/ollama/issues/5903/events
https://github.com/ollama/ollama/issues/5903
2,426,810,305
I_kwDOJ0Z1Ps6Qpi_B
5,903
please add https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct to https://ollama.com/library
{ "login": "Orgoth", "id": 1035170, "node_id": "MDQ6VXNlcjEwMzUxNzA=", "avatar_url": "https://avatars.githubusercontent.com/u/1035170?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Orgoth", "html_url": "https://github.com/Orgoth", "followers_url": "https://api.github.com/users/Orgoth/followers", "following_url": "https://api.github.com/users/Orgoth/following{/other_user}", "gists_url": "https://api.github.com/users/Orgoth/gists{/gist_id}", "starred_url": "https://api.github.com/users/Orgoth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Orgoth/subscriptions", "organizations_url": "https://api.github.com/users/Orgoth/orgs", "repos_url": "https://api.github.com/users/Orgoth/repos", "events_url": "https://api.github.com/users/Orgoth/events{/privacy}", "received_events_url": "https://api.github.com/users/Orgoth/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
3
2024-07-24T07:31:14
2024-07-24T07:39:47
2024-07-24T07:39:47
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
please add https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct to https://ollama.com/library
{ "login": "Orgoth", "id": 1035170, "node_id": "MDQ6VXNlcjEwMzUxNzA=", "avatar_url": "https://avatars.githubusercontent.com/u/1035170?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Orgoth", "html_url": "https://github.com/Orgoth", "followers_url": "https://api.github.com/users/Orgoth/followers", "following_url": "https://api.github.com/users/Orgoth/following{/other_user}", "gists_url": "https://api.github.com/users/Orgoth/gists{/gist_id}", "starred_url": "https://api.github.com/users/Orgoth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Orgoth/subscriptions", "organizations_url": "https://api.github.com/users/Orgoth/orgs", "repos_url": "https://api.github.com/users/Orgoth/repos", "events_url": "https://api.github.com/users/Orgoth/events{/privacy}", "received_events_url": "https://api.github.com/users/Orgoth/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5903/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5903/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/87
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/87/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/87/comments
https://api.github.com/repos/ollama/ollama/issues/87/events
https://github.com/ollama/ollama/pull/87
1,808,281,206
PR_kwDOJ0Z1Ps5Vssxr
87
fix file paths for windows
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-07-17T17:51:38
2023-07-17T18:21:28
2023-07-17T18:21:25
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/87", "html_url": "https://github.com/ollama/ollama/pull/87", "diff_url": "https://github.com/ollama/ollama/pull/87.diff", "patch_url": "https://github.com/ollama/ollama/pull/87.patch", "merged_at": "2023-07-17T18:21:25" }
null
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/87/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/87/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5519
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5519/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5519/comments
https://api.github.com/repos/ollama/ollama/issues/5519/events
https://github.com/ollama/ollama/issues/5519
2,393,732,612
I_kwDOJ0Z1Ps6OrXYE
5,519
Ultraslow Inference on Chromebook
{ "login": "MeDott29", "id": 13264408, "node_id": "MDQ6VXNlcjEzMjY0NDA4", "avatar_url": "https://avatars.githubusercontent.com/u/13264408?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MeDott29", "html_url": "https://github.com/MeDott29", "followers_url": "https://api.github.com/users/MeDott29/followers", "following_url": "https://api.github.com/users/MeDott29/following{/other_user}", "gists_url": "https://api.github.com/users/MeDott29/gists{/gist_id}", "starred_url": "https://api.github.com/users/MeDott29/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MeDott29/subscriptions", "organizations_url": "https://api.github.com/users/MeDott29/orgs", "repos_url": "https://api.github.com/users/MeDott29/repos", "events_url": "https://api.github.com/users/MeDott29/events{/privacy}", "received_events_url": "https://api.github.com/users/MeDott29/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6677367769, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCL2Q", "url": "https://api.github.com/repos/ollama/ollama/labels/needs%20more%20info", "name": "needs more info", "color": "BA8041", "default": false, "description": "More information is needed to assist" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
8
2024-07-06T21:09:46
2024-09-05T23:07:20
2024-09-05T23:07:19
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Update: I used to run ollama on this chromebook when tinyllama came out and it ran great. ### What is the issue? ![image](https://github.com/ollama/ollama/assets/13264408/e37d1a70-8d92-4281-88fe-d7c48745980a) After I install I get this warning: ```bash WARNING: Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies. ``` Google Chrome: Version 126.0.6478.132 (Official Build) (64-bit) Platform: 15886.44.0 (Official Build) stable-channel nami Channel: stable-channel Firmware Version: Google_Nami.10775.123.0 ARC Enabled: true ARC: 11931109 Enterprise Enrolled: false Developer Mode: false I have a slower chromebook that qwen2:0.5b runs great on. ```bash Jul 06 16:55:56 systemd[1]: Started ollama.service - Ollama Service. Jul 06 16:55:56 ollama[470]: Couldn't find '/usr/share/ollama/.ollama/id_ed25519'. Generating new private key. Jul 06 16:55:56 ollama[470]: 2024/07/06 16:55:56 routes.go:1064: INFO server config env="map[CUDA_VISIBLE_DEVICES: GPU_DEVICE_ORDINAL: HIP_VISIBLE_DEVICES: HSA_OVERRIDE_GFX_VERSION: OLLAMA_DEBUG:false OLLAMA_FL> Jul 06 16:55:56 ollama[470]: time=2024-07-06T16:55:56.869-04:00 level=INFO source=images.go:730 msg="total blobs: 0" Jul 06 16:55:56 ollama[470]: time=2024-07-06T16:55:56.870-04:00 level=INFO source=images.go:737 msg="total unused blobs removed: 0" Jul 06 16:55:56 ollama[470]: time=2024-07-06T16:55:56.870-04:00 level=INFO source=routes.go:1111 msg="Listening on 127.0.0.1:11434 (version 0.1.48)" Jul 06 16:55:56 ollama[470]: time=2024-07-06T16:55:56.871-04:00 level=INFO source=payload.go:30 msg="extracting embedded files" dir=/tmp/ollama1676710808/runners Jul 06 16:56:04 ollama[470]: time=2024-07-06T16:56:04.562-04:00 level=INFO source=payload.go:44 msg="Dynamic LLM libraries [cpu cpu_avx cpu_avx2 cuda_v11 rocm_v60101]" Jul 06 16:56:04 ollama[470]: time=2024-07-06T16:56:04.574-04:00 level=INFO source=types.go:98 msg="inference compute" id=0 library=cpu compute="" driver=0.0 name="" total="6.5 GiB" available="6.4 GiB" Jul 06 16:56:07 ollama[470]: [GIN] 2024/07/06 - 16:56:07 | 200 | 66.164µs | 127.0.0.1 | HEAD "/" Jul 06 16:56:07 ollama[470]: [GIN] 2024/07/06 - 16:56:07 | 404 | 452.152µs | 127.0.0.1 | POST "/api/show" Jul 06 16:56:09 ollama[470]: time=2024-07-06T16:56:09.329-04:00 level=INFO source=download.go:136 msg="downloading 8de95da68dc4 in 4 100 MB part(s)" Jul 06 16:56:30 ollama[470]: time=2024-07-06T16:56:30.088-04:00 level=INFO source=download.go:136 msg="downloading 62fbfd9ed093 in 1 182 B part(s)" Jul 06 16:56:31 ollama[470]: time=2024-07-06T16:56:31.820-04:00 level=INFO source=download.go:136 msg="downloading c156170b718e in 1 11 KB part(s)" Jul 06 16:56:33 ollama[470]: time=2024-07-06T16:56:33.488-04:00 level=INFO source=download.go:136 msg="downloading f02dd72bb242 in 1 59 B part(s)" Jul 06 16:56:35 ollama[470]: time=2024-07-06T16:56:35.152-04:00 level=INFO source=download.go:136 msg="downloading 2184ab82477b in 1 488 B part(s)" Jul 06 16:56:37 ollama[470]: [GIN] 2024/07/06 - 16:56:37 | 200 | 30.055385932s | 127.0.0.1 | POST "/api/pull" Jul 06 16:56:37 ollama[470]: [GIN] 2024/07/06 - 16:56:37 | 200 | 75.64794ms | 127.0.0.1 | POST "/api/show" Jul 06 16:56:37 ollama[470]: time=2024-07-06T16:56:37.747-04:00 level=INFO source=memory.go:309 msg="offload to cpu" layers.requested=-1 layers.model=25 layers.offload=0 layers.split="" memory.available="[6.4 G> Jul 06 16:56:37 ollama[470]: time=2024-07-06T16:56:37.748-04:00 level=INFO source=server.go:368 msg="starting llama server" cmd="/tmp/ollama1676710808/runners/cpu_avx2/ollama_llama_server --model /usr/share/oll> Jul 06 16:56:37 ollama[470]: time=2024-07-06T16:56:37.774-04:00 level=INFO source=sched.go:382 msg="loaded runners" count=1 Jul 06 16:56:37 ollama[470]: time=2024-07-06T16:56:37.774-04:00 level=INFO source=server.go:556 msg="waiting for llama runner to start responding" Jul 06 16:56:37 ollama[470]: time=2024-07-06T16:56:37.774-04:00 level=INFO source=server.go:594 msg="waiting for server to become available" status="llm server error" Jul 06 16:56:37 ollama[777]: INFO [main] build info | build=1 commit="7c26775" tid="140636357584768" timestamp=1720299397 Jul 06 16:56:37 ollama[777]: INFO [main] system info | n_threads=4 n_threads_batch=-1 system_info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 > Jul 06 16:56:37 ollama[777]: INFO [main] HTTP server listening | hostname="127.0.0.1" n_threads_http="3" port="45019" tid="140636357584768" timestamp=1720299397 Jul 06 16:56:37 ollama[470]: llama_model_loader: loaded meta data with 21 key-value pairs and 290 tensors from /usr/share/ollama/.ollama/models/blobs/sha256-8de95da68dc485c0889c205384c24642f83ca18d089559c977ffc> Jul 06 16:56:37 ollama[470]: llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 0: general.architecture str = qwen2 Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 1: general.name str = Qwen2-0.5B-Instruct Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 2: qwen2.block_count u32 = 24 Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 3: qwen2.context_length u32 = 32768 Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 4: qwen2.embedding_length u32 = 896 Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 5: qwen2.feed_forward_length u32 = 4864 Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 6: qwen2.attention.head_count u32 = 14 Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 7: qwen2.attention.head_count_kv u32 = 2 Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 8: qwen2.rope.freq_base f32 = 1000000.000000 Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 9: qwen2.attention.layer_norm_rms_epsilon f32 = 0.000001 Jul 06 16:56:37 ollama[470]: llama_model_loader: - kv 10: general.file_type u32 = 2 Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 11: tokenizer.ggml.model str = gpt2 Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 12: tokenizer.ggml.pre str = qwen2 Jul 06 16:56:38 ollama[470]: time=2024-07-06T16:56:38.029-04:00 level=INFO source=server.go:594 msg="waiting for server to become available" status="llm server loading model" Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,151936] = ["!", "\"", "#", "$", "%", "&", "'", ... Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 14: tokenizer.ggml.token_type arr[i32,151936] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 15: tokenizer.ggml.merges arr[str,151387] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",... Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 16: tokenizer.ggml.eos_token_id u32 = 151645 Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 17: tokenizer.ggml.padding_token_id u32 = 151643 Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 18: tokenizer.ggml.bos_token_id u32 = 151643 Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 19: tokenizer.chat_template str = {% for message in messages %}{% if lo... Jul 06 16:56:38 ollama[470]: llama_model_loader: - kv 20: general.quantization_version u32 = 2 Jul 06 16:56:38 ollama[470]: llama_model_loader: - type f32: 121 tensors Jul 06 16:56:38 ollama[470]: llama_model_loader: - type q4_0: 168 tensors Jul 06 16:56:38 ollama[470]: llama_model_loader: - type q8_0: 1 tensors Jul 06 16:56:38 ollama[470]: llm_load_vocab: special tokens cache size = 293 Jul 06 16:56:38 ollama[470]: llm_load_vocab: token to piece cache size = 0.9338 MB Jul 06 16:56:38 ollama[470]: llm_load_print_meta: format = GGUF V3 (latest) Jul 06 16:56:38 ollama[470]: llm_load_print_meta: arch = qwen2 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: vocab type = BPE Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_vocab = 151936 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_merges = 151387 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_ctx_train = 32768 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_embd = 896 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_head = 14 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_head_kv = 2 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_layer = 24 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_rot = 64 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_embd_head_k = 64 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_embd_head_v = 64 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_gqa = 7 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_embd_k_gqa = 128 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_embd_v_gqa = 128 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: f_norm_eps = 0.0e+00 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: f_norm_rms_eps = 1.0e-06 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: f_clamp_kqv = 0.0e+00 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: f_max_alibi_bias = 0.0e+00 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: f_logit_scale = 0.0e+00 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_ff = 4864 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_expert = 0 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_expert_used = 0 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: causal attn = 1 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: pooling type = 0 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: rope type = 2 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: rope scaling = linear Jul 06 16:56:38 ollama[470]: llm_load_print_meta: freq_base_train = 1000000.0 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: freq_scale_train = 1 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: n_ctx_orig_yarn = 32768 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: rope_finetuned = unknown Jul 06 16:56:38 ollama[470]: llm_load_print_meta: ssm_d_conv = 0 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: ssm_d_inner = 0 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: ssm_d_state = 0 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: ssm_dt_rank = 0 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: model type = 1B Jul 06 16:56:38 ollama[470]: llm_load_print_meta: model ftype = Q4_0 Jul 06 16:56:38 ollama[470]: llm_load_print_meta: model params = 494.03 M Jul 06 16:56:38 ollama[470]: llm_load_print_meta: model size = 330.17 MiB (5.61 BPW) Jul 06 16:56:38 ollama[470]: llm_load_print_meta: general.name = Qwen2-0.5B-Instruct Jul 06 16:56:38 ollama[470]: llm_load_print_meta: BOS token = 151643 '<|endoftext|>' Jul 06 16:56:38 ollama[470]: llm_load_print_meta: EOS token = 151645 '<|im_end|>' Jul 06 16:56:38 ollama[470]: llm_load_print_meta: PAD token = 151643 '<|endoftext|>' Jul 06 16:56:38 ollama[470]: llm_load_print_meta: LF token = 148848 'ÄĬ' Jul 06 16:56:38 ollama[470]: llm_load_print_meta: EOT token = 151645 '<|im_end|>' Jul 06 16:56:38 ollama[470]: llm_load_tensors: ggml ctx size = 0.14 MiB Jul 06 16:56:39 ollama[470]: llm_load_tensors: CPU buffer size = 330.17 MiB Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: n_ctx = 2048 Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: n_batch = 512 Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: n_ubatch = 512 Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: flash_attn = 0 Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: freq_base = 1000000.0 Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: freq_scale = 1 Jul 06 16:56:39 ollama[470]: llama_kv_cache_init: CPU KV buffer size = 24.00 MiB Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: KV self size = 24.00 MiB, K (f16): 12.00 MiB, V (f16): 12.00 MiB Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: CPU output buffer size = 0.58 MiB Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: CPU compute buffer size = 298.50 MiB Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: graph nodes = 846 Jul 06 16:56:39 ollama[470]: llama_new_context_with_model: graph splits = 1 Jul 06 16:57:00 ollama[777]: INFO [main] model loaded | tid="140636357584768" timestamp=1720299420 Jul 06 16:57:01 ollama[470]: time=2024-07-06T16:57:01.140-04:00 level=INFO source=server.go:599 msg="llama runner started in 23.37 seconds" Jul 06 16:57:01 ollama[470]: [GIN] 2024/07/06 - 16:57:01 | 200 | 23.451118015s | 127.0.0.1 | POST "/api/chat" Jul 06 16:59:26 ollama[470]: [GIN] 2024/07/06 - 16:59:26 | 200 | 2m15s | 127.0.0.1 | POST "/api/chat" ``` ### OS Linux ### GPU _No response_ ### CPU _No response_ ### Ollama version latest
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5519/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5519/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1107
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1107/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1107/comments
https://api.github.com/repos/ollama/ollama/issues/1107/events
https://github.com/ollama/ollama/issues/1107
1,990,588,202
I_kwDOJ0Z1Ps52pfcq
1,107
MacOS Env Variables
{ "login": "ageorgios", "id": 3126351, "node_id": "MDQ6VXNlcjMxMjYzNTE=", "avatar_url": "https://avatars.githubusercontent.com/u/3126351?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ageorgios", "html_url": "https://github.com/ageorgios", "followers_url": "https://api.github.com/users/ageorgios/followers", "following_url": "https://api.github.com/users/ageorgios/following{/other_user}", "gists_url": "https://api.github.com/users/ageorgios/gists{/gist_id}", "starred_url": "https://api.github.com/users/ageorgios/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ageorgios/subscriptions", "organizations_url": "https://api.github.com/users/ageorgios/orgs", "repos_url": "https://api.github.com/users/ageorgios/repos", "events_url": "https://api.github.com/users/ageorgios/events{/privacy}", "received_events_url": "https://api.github.com/users/ageorgios/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
3
2023-11-13T12:42:35
2024-02-20T01:07:31
2024-02-20T01:07:31
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
how to start ollama with OLLAMA_ORIGINS=* OLLAMA_HOST=127.0.0.1:11435 ?
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1107/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1107/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8592
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8592/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8592/comments
https://api.github.com/repos/ollama/ollama/issues/8592/events
https://github.com/ollama/ollama/issues/8592
2,811,574,204
I_kwDOJ0Z1Ps6nlTe8
8,592
ollama fails to detect old models after update
{ "login": "nevakrien", "id": 101988414, "node_id": "U_kgDOBhQ4Pg", "avatar_url": "https://avatars.githubusercontent.com/u/101988414?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nevakrien", "html_url": "https://github.com/nevakrien", "followers_url": "https://api.github.com/users/nevakrien/followers", "following_url": "https://api.github.com/users/nevakrien/following{/other_user}", "gists_url": "https://api.github.com/users/nevakrien/gists{/gist_id}", "starred_url": "https://api.github.com/users/nevakrien/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nevakrien/subscriptions", "organizations_url": "https://api.github.com/users/nevakrien/orgs", "repos_url": "https://api.github.com/users/nevakrien/repos", "events_url": "https://api.github.com/users/nevakrien/events{/privacy}", "received_events_url": "https://api.github.com/users/nevakrien/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
1
2025-01-26T13:53:38
2025-01-26T14:03:02
2025-01-26T14:03:01
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? so my setup has a semi link for runing ollama model and I think i have over a tera byte of model weights so if there is a way to make it so i dont need to download the entire thing again i would be very happy ### OS Linux ### GPU _No response_ ### CPU _No response_ ### Ollama version 0.5.7
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8592/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8592/timeline
null
duplicate
false
https://api.github.com/repos/ollama/ollama/issues/43
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/43/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/43/comments
https://api.github.com/repos/ollama/ollama/issues/43/events
https://github.com/ollama/ollama/pull/43
1,792,030,859
PR_kwDOJ0Z1Ps5U1ghW
43
more free
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-07-06T18:25:15
2023-07-06T19:04:59
2023-07-06T19:04:44
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/43", "html_url": "https://github.com/ollama/ollama/pull/43", "diff_url": "https://github.com/ollama/ollama/pull/43.diff", "patch_url": "https://github.com/ollama/ollama/pull/43.patch", "merged_at": "2023-07-06T19:04:44" }
null
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/43/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/43/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2781
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2781/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2781/comments
https://api.github.com/repos/ollama/ollama/issues/2781/events
https://github.com/ollama/ollama/issues/2781
2,156,655,954
I_kwDOJ0Z1Ps6Ai_VS
2,781
Specifying GPU Usage on Linux Systems
{ "login": "rongkunxue", "id": 129533180, "node_id": "U_kgDOB7iE_A", "avatar_url": "https://avatars.githubusercontent.com/u/129533180?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rongkunxue", "html_url": "https://github.com/rongkunxue", "followers_url": "https://api.github.com/users/rongkunxue/followers", "following_url": "https://api.github.com/users/rongkunxue/following{/other_user}", "gists_url": "https://api.github.com/users/rongkunxue/gists{/gist_id}", "starred_url": "https://api.github.com/users/rongkunxue/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rongkunxue/subscriptions", "organizations_url": "https://api.github.com/users/rongkunxue/orgs", "repos_url": "https://api.github.com/users/rongkunxue/repos", "events_url": "https://api.github.com/users/rongkunxue/events{/privacy}", "received_events_url": "https://api.github.com/users/rongkunxue/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
0
2024-02-27T13:54:46
2024-03-12T18:36:47
2024-03-12T18:36:47
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
To check if the "ollama serve" process is running, execute `ps aux | grep "ollama serve"`. If the output matches the status shown below, it indicates normal operation. ![image-20240227214151610](https://p.ipic.vip/o8qagv.png). However, if it's automatically started by the system, specifying the GPU becomes problematic. To address this, we need to first stop the automatic startup service: ``` sudo service ollama stop ``` Then, set the location of the models as an environment variable: ``` export OLLAMA_MODELS=/usr/share/ollama/.ollama/models ``` Now, start the server: ``` CUDA_VISIBLE_DEVICES=0 ollama serve ``` Following these steps should resolve the issue. If you lack permission to stop the system's automatic service, you can include the `HOST` parameter in the `CUDA_VISIBLE_DEVICES=0 ollama serve` command to change the IP address, effectively creating two instances. For further reference, please check the following GitHub resources: - [GitHub Issue #690](https://github.com/ollama/ollama/issues/690) - [API Documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) - [GitHub Issue #1737](https://github.com/ollama/ollama/issues/1737)
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2781/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2781/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3143
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3143/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3143/comments
https://api.github.com/repos/ollama/ollama/issues/3143/events
https://github.com/ollama/ollama/pull/3143
2,186,793,487
PR_kwDOJ0Z1Ps5pp3la
3,143
.github: add issue templates
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-03-14T16:31:28
2024-03-14T22:19:11
2024-03-14T22:19:11
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3143", "html_url": "https://github.com/ollama/ollama/pull/3143", "diff_url": "https://github.com/ollama/ollama/pull/3143.diff", "patch_url": "https://github.com/ollama/ollama/pull/3143.patch", "merged_at": "2024-03-14T22:19:11" }
You can view a functional test version of this PR here: https://github.com/bmizerany/ollama-test-issues-tempates/issues/new/choose
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3143/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3143/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/20
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/20/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/20/comments
https://api.github.com/repos/ollama/ollama/issues/20/events
https://github.com/ollama/ollama/pull/20
1,781,183,958
PR_kwDOJ0Z1Ps5UQsJO
20
use difflib.get_close_matches
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-06-29T16:43:58
2023-06-30T18:54:12
2023-06-30T18:54:07
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/20", "html_url": "https://github.com/ollama/ollama/pull/20", "diff_url": "https://github.com/ollama/ollama/pull/20.diff", "patch_url": "https://github.com/ollama/ollama/pull/20.patch", "merged_at": "2023-06-30T18:54:07" }
same result with less code
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/20/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/20/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4120
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4120/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4120/comments
https://api.github.com/repos/ollama/ollama/issues/4120/events
https://github.com/ollama/ollama/pull/4120
2,277,097,855
PR_kwDOJ0Z1Ps5ucWdZ
4,120
feat: add support for flash_attn
{ "login": "sammcj", "id": 862951, "node_id": "MDQ6VXNlcjg2Mjk1MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/862951?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sammcj", "html_url": "https://github.com/sammcj", "followers_url": "https://api.github.com/users/sammcj/followers", "following_url": "https://api.github.com/users/sammcj/following{/other_user}", "gists_url": "https://api.github.com/users/sammcj/gists{/gist_id}", "starred_url": "https://api.github.com/users/sammcj/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sammcj/subscriptions", "organizations_url": "https://api.github.com/users/sammcj/orgs", "repos_url": "https://api.github.com/users/sammcj/repos", "events_url": "https://api.github.com/users/sammcj/events{/privacy}", "received_events_url": "https://api.github.com/users/sammcj/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
34
2024-05-03T07:28:07
2024-05-23T01:13:29
2024-05-20T20:36:03
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4120", "html_url": "https://github.com/ollama/ollama/pull/4120", "diff_url": "https://github.com/ollama/ollama/pull/4120.diff", "patch_url": "https://github.com/ollama/ollama/pull/4120.patch", "merged_at": "2024-05-20T20:36:03" }
- Add Flash Attention support #4051 Only enabled by default on a supported CUDA version or Metal is detected, configurable via params and the API. Credit to @wanderingmeow who [took my broken idea and made it work](https://github.com/ollama/ollama/issues/4051#issuecomment-2092430887) 🎉 Fixes #4051
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4120/reactions", "total_count": 26, "+1": 0, "-1": 0, "laugh": 0, "hooray": 13, "confused": 0, "heart": 0, "rocket": 13, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4120/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7948
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7948/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7948/comments
https://api.github.com/repos/ollama/ollama/issues/7948/events
https://github.com/ollama/ollama/pull/7948
2,719,880,843
PR_kwDOJ0Z1Ps6EJ6C2
7,948
Update README.md
{ "login": "taozuhong", "id": 13451009, "node_id": "MDQ6VXNlcjEzNDUxMDA5", "avatar_url": "https://avatars.githubusercontent.com/u/13451009?v=4", "gravatar_id": "", "url": "https://api.github.com/users/taozuhong", "html_url": "https://github.com/taozuhong", "followers_url": "https://api.github.com/users/taozuhong/followers", "following_url": "https://api.github.com/users/taozuhong/following{/other_user}", "gists_url": "https://api.github.com/users/taozuhong/gists{/gist_id}", "starred_url": "https://api.github.com/users/taozuhong/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/taozuhong/subscriptions", "organizations_url": "https://api.github.com/users/taozuhong/orgs", "repos_url": "https://api.github.com/users/taozuhong/repos", "events_url": "https://api.github.com/users/taozuhong/events{/privacy}", "received_events_url": "https://api.github.com/users/taozuhong/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-12-05T09:39:54
2024-12-10T21:48:44
2024-12-10T21:48:32
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7948", "html_url": "https://github.com/ollama/ollama/pull/7948", "diff_url": "https://github.com/ollama/ollama/pull/7948.diff", "patch_url": "https://github.com/ollama/ollama/pull/7948.patch", "merged_at": "2024-12-10T21:48:32" }
Add database app example: Kangaroo
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7948/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7948/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2433
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2433/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2433/comments
https://api.github.com/repos/ollama/ollama/issues/2433/events
https://github.com/ollama/ollama/pull/2433
2,127,905,234
PR_kwDOJ0Z1Ps5mhaps
2,433
refactor: Update LangChain dep in example
{ "login": "bracesproul", "id": 46789226, "node_id": "MDQ6VXNlcjQ2Nzg5MjI2", "avatar_url": "https://avatars.githubusercontent.com/u/46789226?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bracesproul", "html_url": "https://github.com/bracesproul", "followers_url": "https://api.github.com/users/bracesproul/followers", "following_url": "https://api.github.com/users/bracesproul/following{/other_user}", "gists_url": "https://api.github.com/users/bracesproul/gists{/gist_id}", "starred_url": "https://api.github.com/users/bracesproul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bracesproul/subscriptions", "organizations_url": "https://api.github.com/users/bracesproul/orgs", "repos_url": "https://api.github.com/users/bracesproul/repos", "events_url": "https://api.github.com/users/bracesproul/events{/privacy}", "received_events_url": "https://api.github.com/users/bracesproul/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-02-09T22:00:51
2024-07-16T20:16:07
2024-07-16T20:16:03
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2433", "html_url": "https://github.com/ollama/ollama/pull/2433", "diff_url": "https://github.com/ollama/ollama/pull/2433.diff", "patch_url": "https://github.com/ollama/ollama/pull/2433.patch", "merged_at": null }
Update example to use `@langchain/community` package. Update `langchainjs.md` to use `langchain` & `@langchain/community`
{ "login": "bracesproul", "id": 46789226, "node_id": "MDQ6VXNlcjQ2Nzg5MjI2", "avatar_url": "https://avatars.githubusercontent.com/u/46789226?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bracesproul", "html_url": "https://github.com/bracesproul", "followers_url": "https://api.github.com/users/bracesproul/followers", "following_url": "https://api.github.com/users/bracesproul/following{/other_user}", "gists_url": "https://api.github.com/users/bracesproul/gists{/gist_id}", "starred_url": "https://api.github.com/users/bracesproul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bracesproul/subscriptions", "organizations_url": "https://api.github.com/users/bracesproul/orgs", "repos_url": "https://api.github.com/users/bracesproul/repos", "events_url": "https://api.github.com/users/bracesproul/events{/privacy}", "received_events_url": "https://api.github.com/users/bracesproul/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2433/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2433/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1565
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1565/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1565/comments
https://api.github.com/repos/ollama/ollama/issues/1565/events
https://github.com/ollama/ollama/issues/1565
2,044,757,366
I_kwDOJ0Z1Ps554IV2
1,565
CausalLM 14B support
{ "login": "walking-octopus", "id": 46994949, "node_id": "MDQ6VXNlcjQ2OTk0OTQ5", "avatar_url": "https://avatars.githubusercontent.com/u/46994949?v=4", "gravatar_id": "", "url": "https://api.github.com/users/walking-octopus", "html_url": "https://github.com/walking-octopus", "followers_url": "https://api.github.com/users/walking-octopus/followers", "following_url": "https://api.github.com/users/walking-octopus/following{/other_user}", "gists_url": "https://api.github.com/users/walking-octopus/gists{/gist_id}", "starred_url": "https://api.github.com/users/walking-octopus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/walking-octopus/subscriptions", "organizations_url": "https://api.github.com/users/walking-octopus/orgs", "repos_url": "https://api.github.com/users/walking-octopus/repos", "events_url": "https://api.github.com/users/walking-octopus/events{/privacy}", "received_events_url": "https://api.github.com/users/walking-octopus/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
4
2023-12-16T13:53:48
2023-12-21T10:59:34
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
CausalLM 14B is a SOTA 14B chat model (take benchmarks with a grain of sault, fully compatible with LLaMA 2. - GGML HF: https://huggingface.co/TheBloke/CausalLM-14B-GGUF - HF: https://huggingface.co/CausalLM/14B While it's probably the wrong place to post it, since ollama.ai/library doesn't have its own issue tracker, I had to contaminate the Github issue of the inference server with this request.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1565/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1565/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1611
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1611/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1611/comments
https://api.github.com/repos/ollama/ollama/issues/1611/events
https://github.com/ollama/ollama/issues/1611
2,049,257,550
I_kwDOJ0Z1Ps56JTBO
1,611
Discord management
{ "login": "Luxadevi", "id": 116653852, "node_id": "U_kgDOBvP_HA", "avatar_url": "https://avatars.githubusercontent.com/u/116653852?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Luxadevi", "html_url": "https://github.com/Luxadevi", "followers_url": "https://api.github.com/users/Luxadevi/followers", "following_url": "https://api.github.com/users/Luxadevi/following{/other_user}", "gists_url": "https://api.github.com/users/Luxadevi/gists{/gist_id}", "starred_url": "https://api.github.com/users/Luxadevi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Luxadevi/subscriptions", "organizations_url": "https://api.github.com/users/Luxadevi/orgs", "repos_url": "https://api.github.com/users/Luxadevi/repos", "events_url": "https://api.github.com/users/Luxadevi/events{/privacy}", "received_events_url": "https://api.github.com/users/Luxadevi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-12-19T18:52:02
2023-12-19T19:49:15
2023-12-19T19:49:15
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
A lot of users are complaining about the usability of the Ollama Discord. There is only one channel, and with the growing userbase, it will sometimes be impossible to converse or help other users with issues. So, I wanted to propose making some varied channels for different use cases. A couple of examples are: - General - Model Discussion - API Discussion - Integrations - Model File/Configuration Sharing - Help/Support Channel - Off-topic These are just a few examples, depending on the needs and ideas from the community. Also, it would be a good idea to have some sort of moderation team and rules on the Discord about what users are allowed to discuss. I also want to offer myself up to make/implement these changes and be part of moderation. My discord username is luxaplexx, you probably seen me around :)
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1611/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1611/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1120
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1120/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1120/comments
https://api.github.com/repos/ollama/ollama/issues/1120/events
https://github.com/ollama/ollama/pull/1120
1,992,054,407
PR_kwDOJ0Z1Ps5fX9ub
1,120
Add Maid to Community Integrations
{ "login": "danemadsen", "id": 11537699, "node_id": "MDQ6VXNlcjExNTM3Njk5", "avatar_url": "https://avatars.githubusercontent.com/u/11537699?v=4", "gravatar_id": "", "url": "https://api.github.com/users/danemadsen", "html_url": "https://github.com/danemadsen", "followers_url": "https://api.github.com/users/danemadsen/followers", "following_url": "https://api.github.com/users/danemadsen/following{/other_user}", "gists_url": "https://api.github.com/users/danemadsen/gists{/gist_id}", "starred_url": "https://api.github.com/users/danemadsen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/danemadsen/subscriptions", "organizations_url": "https://api.github.com/users/danemadsen/orgs", "repos_url": "https://api.github.com/users/danemadsen/repos", "events_url": "https://api.github.com/users/danemadsen/events{/privacy}", "received_events_url": "https://api.github.com/users/danemadsen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-11-14T06:08:24
2023-11-16T16:27:54
2023-11-16T16:27:53
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1120", "html_url": "https://github.com/ollama/ollama/pull/1120", "diff_url": "https://github.com/ollama/ollama/pull/1120.diff", "patch_url": "https://github.com/ollama/ollama/pull/1120.patch", "merged_at": "2023-11-16T16:27:53" }
Maid is a cross-platform Flutter app for interfacing with GGUF / Llama models locally via Llama.cpp and interfacing remotely via Ollama. Currently Android, Windows and Linux are supported with plans to support Mac and IOS in the future. Maid is working great with Ollama now and once #991 goes through it will work even better.
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1120/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1120/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5346
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5346/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5346/comments
https://api.github.com/repos/ollama/ollama/issues/5346/events
https://github.com/ollama/ollama/issues/5346
2,379,170,456
I_kwDOJ0Z1Ps6Nz0KY
5,346
Gemma2:27b start to output repetive trash after few generations
{ "login": "Mikhael-Danilov", "id": 536516, "node_id": "MDQ6VXNlcjUzNjUxNg==", "avatar_url": "https://avatars.githubusercontent.com/u/536516?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Mikhael-Danilov", "html_url": "https://github.com/Mikhael-Danilov", "followers_url": "https://api.github.com/users/Mikhael-Danilov/followers", "following_url": "https://api.github.com/users/Mikhael-Danilov/following{/other_user}", "gists_url": "https://api.github.com/users/Mikhael-Danilov/gists{/gist_id}", "starred_url": "https://api.github.com/users/Mikhael-Danilov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mikhael-Danilov/subscriptions", "organizations_url": "https://api.github.com/users/Mikhael-Danilov/orgs", "repos_url": "https://api.github.com/users/Mikhael-Danilov/repos", "events_url": "https://api.github.com/users/Mikhael-Danilov/events{/privacy}", "received_events_url": "https://api.github.com/users/Mikhael-Danilov/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-06-27T22:08:46
2024-07-10T22:15:41
2024-07-10T22:15:41
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Gemma2:27b start to output repetive trash after few generations <details><summary>Session log</summary> <p> ``` >>> Write a long story Please provide me with some information about the story you'd like me to write, such as: * **Genre:** What kind of story are you looking for? (e.g., fantasy, science fiction, romance, mystery, thriller) * **Characters:** Who are the main characters? What are they like? * **Setting:** Where and when does the story take place? * **Plot:** Give me a brief idea of what you want the story to be about. What is the main conflict? What are the characters' goals? * **Tone:** What kind of tone do you want the story to have? (e.g., serious, lighthearted, humorous, suspenseful) * **Length:** Do you have a preference for a shorter story or a longer one? The more details you give me, the better I can craft a story to your liking. For example, you could tell me: "Write a long story about a group of friends who discover a magical portal in their attic. The story should be a fantasy adventure with a sense of wonder and a bit of humor. The friends are three teenagers, a bit awkward and unsure of themselves, but eager to prove they can be brave. Give me a few ideas for the characters: * **A shy, bookish girl who's secretly a coding prodigy:** This could be a fun twist where her coding skills are essential for navigating the digital world of the portal. * **A sarcastic and witty boy who's secretly a romantic:** He might act tough and skeptical, but his true nature comes out in the adventure. * **A kind-hearted, goofy boy who's surprisingly resourceful:** He's the one who's always making jokes and cracking wise, but he has a knack for finding solutions to problems in the most unexpected ways. * **A brave and adventurous girl who's always the leader:** She's the first to jump into the unknown, but her courage often stems from a desire to protect her friends. Let me know what kind of story you'd like to hear, and I'll do my best to weave a tale of teenage bravery and a fantastical adventure!" I'm excited to see what kind of story we can create together! ✨ >>> a fantasy story about hedgehog Barnaby Bumblebottom wasn't your typical hedgehog. Oh, he had the prickly exterior, the love for juicy slugs, and the tendency to curl into a ball at the slightest hint of danger. But while other hedgehogs in the Whispering Woods might be content with a simple life of foraging and napping, Barnaby yearned for something more. He dreamed of adventure, of venturing beyond the familiar hedges and brambles of his home, of seeing the world beyond the burrow. But the truth was, Barnaby was terrified of anything new. He'd never even left his burrow to explore the garden he lived in. One day, while Barnaby was nervously nibbling on a particularly juicy earthworm, a gust of wind blew a shimmering, iridescent butterfly into the garden. It landed gracefully on a dandelion clock, and a thought struck him: "This is it. I'm going to finally get out of this boring burrow and see what's on the other side of that hedge." He'd heard whispers from the elder hedgehogs about a magical world beyond the garden fence, a world where adventures were plentiful and the tastiest beetles lived. But this was the first time Barnaby had seen it with his own eyes. "A beetle with wings that sparkle? That's not something you see every day, is it?" squeaked Hazel, a young hedgehog who was anything but shy. Barnaby, startled by her voice, quickly uncurled and retreated behind a mushroom. "I don't know, I don't go outside," he mumbled, his spines tingling with embarrassment. "But I heard you're a bit of a coding whiz, Hazel," a voice chuckled from behind the mushroom. "Why are you always making me out to be the fearless adventurer, the one who'll code their way out of any danger, and the one who'll always be the first to try something new," a voice chuckled. "I'm more of a 'let's stay in the burrow and eat' type of hedgehog." He was right. He was the first to create a program that could map the pathways of the Burrow Network, a secret web of tunnels connecting the gardens of the human world to the world of the magical and the mundane. Barnaby, the hedgehog, was a coding prodigy. He hadn't just mastered the usual programming languages; he'd cracked the code of the human world, a world of wonder and danger to him, a shy, bookish creature who preferred the safety of his cozy burrow. He lived in a world of ones and zeroes, of code and algorithms. His days were spent curled up in the comfort of his den, a world of circuits and algorithms, working on his latest project: a program to map the ancient pathways of the Burrow Network. Barnaby was convinced that he could access the 'Human World' through a code hidden in the leaves. "It's not hidden," he said, "it's a map. And I'm going to write a program to find it. A program to unlock the secrets of the garden, the mysteries of the world beyond." Barnaby's eyes, usually filled with the gleam of a thousand lines of code, sparkled with the thrill of the unknown. "It's a secret world," he said, "a world of magic and the mundane." Barnaby's words, though spoken with a hint of bravado, were tinged with a wistful yearning. He'd always been fascinated by the stories the older hedgehogs told about the world above. He'd seen it, too. The world above, with its vibrant flowers and bustling bees, wasn't all sunshine and roses. He knew that. Barnaby, the hedgehog, was the key. He knew the world above wasn't always safe. There were dangers, of course. But there was also beauty, adventure, and the thrill of discovery. He'd always wanted to experience it, but fear held him back. He'd always been a bit of a scaredy-cat. He'd heard tales of the dangers that lurked above, tales of owls and hawks and hawks disguised as owls. He imagined himself, a brave and adventurous hedgehog, venturing out into the world beyond his burrow, a world of endless possibilities. But the world above was a scary place for a hedgehog. "I'm not sure I'm cut out for this," he muttered, glancing nervously at the shadows lengthening across the garden. The other hedgehogs, who were used to his coding prowess, were not so familiar with his anxiety. He'd seen them all, the squirrels and the sparrows, the butterflies and the bees, the rabbits and the birds, and the flowers that bloomed with the most vibrant and beautiful colors. But Barnaby's code, it was different. He'd seen them, the rabbits and the birds, he'd seen them all. But the world of the den, with its safety and comfort, was a world he knew. He'd written a program, a map of the world, that connected them all. He'd mapped the paths of the world beyond, the paths of the den that was. He'd mapped the world of the den, the world of the human world. He'd even written a program to map the interactions between them. "But it's just a map, right?" "It's a map," he said, "but it's more than that. It's a key to understanding the world, a way to bridge the gap between the known and the unknown." Barnaby knew he was just a shy, introverted hedgehog, but he was determined to be the one who broke the code. He wanted to understand the world beyond the garden, the world of the humans. He'd been working on a program for years, a program to understand the complex rules and regulations that governed the world of humans. "It's a challenge, for sure," the coder said. The humans, Barnaby knew, were a mystery. He'd heard stories about them, of course, but they were always about their bravery. He'd seen their world up close, but he was a coder, not a warrior. A creature of logic and code, not of battles and bloodshed. But what if the world above was just a reflection of the world down? What if the skills he used to navigate the world of the internet were the same skills that could help him navigate the world of the humans? He was a master of the code, After all, he'd never needed a warrior's skills to survive the world above. He'd figured out the pattern of the slugs, The best time to catch them, the tastiest ones. He'd discovered the secret paths they followed, The algorithms that dictated their lives. He'd even created a program For his fellow hedgehogs, to help them navigate the world of the internet. But the world above, was not the den, and he'd always been a little afraid of leaving. He'd never been outside before. But he knew "The Lord of the Rings," J.R.R. Tolkien's epic fantasy novel, the "Lord of the world's" "The Hobbit" and "Lord of the Rings" are the story of a " world's" " "The Lord of the Rings" The Shire, the home of the hobbits, is a peaceful and idyllic land. "The world's of "The Shire" a. "The" The story "The Hobbit" and "The Lord of the Rings" are a "The Shire" "The Shire" is a place, and "The Lord of the Rings" is a "The Lord" "The Shire" was a place, and it was a peaceful and idyllic. "The Shire" "The Hobbit" The "of" " of "The " of the world. "The Lord of the Rings" " " " of the world. The hobbit "S The Shire " " The Shire" is a place, the "Lord The "The The Lord of the. "The Shire" a "The Shire" "The "The Hobbit" "The Shire" "The The world The " of " The "The Hobbit" "The This. "The " of the hobbits, and the Shire is a place. "The Hobbit" is a story about the "The the world a. the world is the world" The Shire "The the world" is a series of "The the The "The "The " is a place is a The, the " the world T The Hobbit The Shire" is a place of adventure. " The "The The Shire" of the world" The " "The The Shire The Shire " of "The Lord of the Rings" The. a. the, a "The "The Hobbit" is a "The " the "The Shire" the "The Lord" the "The Shire" is a place "The Hobbit" of the "The is a novel. The "The T. "The Shire" The hobbit "The Shire" was a place to "The Shire "The is a "The is a The The of the . " "The The Shire "The "The of "The " the" of the world . " the hobbits" of the world" The The a place " of the world the " "The of the a world The "The " The Hobbit "The The Hobbit The " is a The Hobbit" The Shire" The^C ``` </p> </details> ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.1.47
{ "login": "Mikhael-Danilov", "id": 536516, "node_id": "MDQ6VXNlcjUzNjUxNg==", "avatar_url": "https://avatars.githubusercontent.com/u/536516?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Mikhael-Danilov", "html_url": "https://github.com/Mikhael-Danilov", "followers_url": "https://api.github.com/users/Mikhael-Danilov/followers", "following_url": "https://api.github.com/users/Mikhael-Danilov/following{/other_user}", "gists_url": "https://api.github.com/users/Mikhael-Danilov/gists{/gist_id}", "starred_url": "https://api.github.com/users/Mikhael-Danilov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mikhael-Danilov/subscriptions", "organizations_url": "https://api.github.com/users/Mikhael-Danilov/orgs", "repos_url": "https://api.github.com/users/Mikhael-Danilov/repos", "events_url": "https://api.github.com/users/Mikhael-Danilov/events{/privacy}", "received_events_url": "https://api.github.com/users/Mikhael-Danilov/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5346/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5346/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/850
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/850/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/850/comments
https://api.github.com/repos/ollama/ollama/issues/850/events
https://github.com/ollama/ollama/issues/850
1,954,331,161
I_kwDOJ0Z1Ps50fLoZ
850
Slow download speed
{ "login": "youssef02", "id": 26381154, "node_id": "MDQ6VXNlcjI2MzgxMTU0", "avatar_url": "https://avatars.githubusercontent.com/u/26381154?v=4", "gravatar_id": "", "url": "https://api.github.com/users/youssef02", "html_url": "https://github.com/youssef02", "followers_url": "https://api.github.com/users/youssef02/followers", "following_url": "https://api.github.com/users/youssef02/following{/other_user}", "gists_url": "https://api.github.com/users/youssef02/gists{/gist_id}", "starred_url": "https://api.github.com/users/youssef02/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/youssef02/subscriptions", "organizations_url": "https://api.github.com/users/youssef02/orgs", "repos_url": "https://api.github.com/users/youssef02/repos", "events_url": "https://api.github.com/users/youssef02/events{/privacy}", "received_events_url": "https://api.github.com/users/youssef02/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
5
2023-10-20T13:47:01
2023-10-26T20:15:42
2023-10-26T20:13:46
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Is there. A possible way to have manual installation, because I want to download the model from a fast proxy or something similar, the speed for. Mistral is 160 kbit/s, and 4 GB is it hosted on a different server or is it possible to download using a torrent or something that don't limit my download speed, I have WARP to bypass Türkiye IP ban, but the speed is still causing me headache, can someone tell me what are my options. Here. Thanks in advance.
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/850/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/850/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8048
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8048/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8048/comments
https://api.github.com/repos/ollama/ollama/issues/8048/events
https://github.com/ollama/ollama/issues/8048
2,733,305,696
I_kwDOJ0Z1Ps6i6u9g
8,048
Retain existing systemd control file when upgrading ollama versions
{ "login": "greenmotion", "id": 26799168, "node_id": "MDQ6VXNlcjI2Nzk5MTY4", "avatar_url": "https://avatars.githubusercontent.com/u/26799168?v=4", "gravatar_id": "", "url": "https://api.github.com/users/greenmotion", "html_url": "https://github.com/greenmotion", "followers_url": "https://api.github.com/users/greenmotion/followers", "following_url": "https://api.github.com/users/greenmotion/following{/other_user}", "gists_url": "https://api.github.com/users/greenmotion/gists{/gist_id}", "starred_url": "https://api.github.com/users/greenmotion/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/greenmotion/subscriptions", "organizations_url": "https://api.github.com/users/greenmotion/orgs", "repos_url": "https://api.github.com/users/greenmotion/repos", "events_url": "https://api.github.com/users/greenmotion/events{/privacy}", "received_events_url": "https://api.github.com/users/greenmotion/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
2
2024-12-11T15:34:34
2024-12-11T17:21:22
2024-12-11T17:21:22
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
My Ollama process is managed via systemd and the Ollama systemd file contains several "Environment=" variables. Unfortunately, every time I upgrade Ollama, it resets it's systemd file and I will need to re-enter those custom Environment variables. Would it be possible for the installer to NOT overwrite the systemd control file, or perhaps retain customizations? -GM
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8048/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8048/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7491
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7491/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7491/comments
https://api.github.com/repos/ollama/ollama/issues/7491/events
https://github.com/ollama/ollama/issues/7491
2,632,971,850
I_kwDOJ0Z1Ps6c7_ZK
7,491
cuda runner fails to build correctly without CUDA_PATH set
{ "login": "auiphc", "id": 124700184, "node_id": "U_kgDOB27GGA", "avatar_url": "https://avatars.githubusercontent.com/u/124700184?v=4", "gravatar_id": "", "url": "https://api.github.com/users/auiphc", "html_url": "https://github.com/auiphc", "followers_url": "https://api.github.com/users/auiphc/followers", "following_url": "https://api.github.com/users/auiphc/following{/other_user}", "gists_url": "https://api.github.com/users/auiphc/gists{/gist_id}", "starred_url": "https://api.github.com/users/auiphc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/auiphc/subscriptions", "organizations_url": "https://api.github.com/users/auiphc/orgs", "repos_url": "https://api.github.com/users/auiphc/repos", "events_url": "https://api.github.com/users/auiphc/events{/privacy}", "received_events_url": "https://api.github.com/users/auiphc/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 5860134234, "node_id": "LA_kwDOJ0Z1Ps8AAAABXUqNWg", "url": "https://api.github.com/repos/ollama/ollama/labels/windows", "name": "windows", "color": "0052CC", "default": false, "description": "" }, { "id": 7700262114, "node_id": "LA_kwDOJ0Z1Ps8AAAAByvis4g", "url": "https://api.github.com/repos/ollama/ollama/labels/build", "name": "build", "color": "006b75", "default": false, "description": "Issues relating to building ollama from source" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
1
2024-11-04T14:28:43
2024-12-10T17:47:20
2024-12-10T17:47:20
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I'm following this https://github.com/ollama/ollama/blob/main/docs/development.md#windows to build ollama on windows but I got a segmentation fault: ``` (base) PS C:\Users\50728\Documents\code\localcai-server\ollama> make -j 5 /usr/bin/make -C llama all make[1]: Entering directory '/c/Users/50728/Documents/code/localcai-server/ollama/llama' rm -rf C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/build/windows/amd64/default C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/build/windows/amd64/cuda_v12 C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/build/windows/amd64/metal C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/build/windows/amd64/cpu C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/build/windows/amd64/cpu_avx C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/build/windows/amd64/cpu_avx2 /usr/bin/make -f make/Makefile.default /usr/bin/make -f make/Makefile.cuda_v12 make[2]: Entering directory '/c/Users/50728/Documents/code/localcai-server/ollama/llama' make[2]: Entering directory '/c/Users/50728/Documents/code/localcai-server/ollama/llama' make[2]: Nothing to be done for 'all'. make[2]: Leaving directory '/c/Users/50728/Documents/code/localcai-server/ollama/llama' /bin/nvcc --shared -lcuda -LC:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/dist/windows-amd64/lib/ollama -lcublas -lcudart -lcublasLt C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/acc.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/arange.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/argsort.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/binbcast.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/clamp.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/concat.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/conv-transpose-1d.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/convert.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/cpy.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/cross-entropy-loss.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/diagmask.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/dmmv.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/getrows.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/im2col.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/mmq.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/mmvq.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/norm.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/opt-step-adamw.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/out-prod.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/pad.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/pool2d.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/quantize.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/rope.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/rwkv-wkv.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/scale.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/softmax.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/sum.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/sumrows.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/tsembd.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/unary.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/upscale.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-iq1_s.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-iq2_s.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-iq2_xs.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-iq3_s.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-iq4_nl.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-iq4_xs.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q2_k.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q3_k.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q4_0.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q4_1.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q4_k.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q5_0.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q5_1.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q5_k.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q6_k.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/mmq-instance-q8_0.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-backend.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-alloc.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-quants.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/sgemm.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-aarch64.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/fattn-tile-f16.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/fattn-tile-f32.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/fattn.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cuda_v12.obj C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cuda_v12.obj -o C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/runners/cuda_v12/ggml_cuda_v12.dll Internal error make[2]: *** [make/gpu.make:88: C:/Users/50728/DOCUME~1/code/LOCALC~1/ollama/llama/build/windows-amd64/runners/cuda_v12/ggml_cuda_v12.dll] Segmentation fault make[2]: Leaving directory '/c/Users/50728/Documents/code/localcai-server/ollama/llama' make[1]: *** [Makefile:41: cuda_v12] Error 2 make[1]: Leaving directory '/c/Users/50728/Documents/code/localcai-server/ollama/llama' make: *** [Makefile:4: all] Error 2 ``` My environment is: ``` (base) PS C:\Users\50728\Documents\code\localcai-server\ollama> make --version GNU Make 4.4.1 Built for x86_64-pc-msys Copyright (C) 1988-2023 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html> This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. (base) PS C:\Users\50728\Documents\code\localcai-server\ollama> nvcc --version nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2024 NVIDIA Corporation Built on Thu_Mar_28_02:30:10_Pacific_Daylight_Time_2024 Cuda compilation tools, release 12.4, V12.4.131 Build cuda_12.4.r12.4/compiler.34097967_0 (base) PS C:\Users\50728\Documents\code\localcai-server\ollama> cl 用于 x64 的 Microsoft (R) C/C++ 优化编译器 19.41.34123 版 版权所有(C) Microsoft Corporation。保留所有权利。 ``` What I have done to slove previous issue is: 1. link nvcc to msys2 `ln -s "/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.4/bin/nvcc" /usr/bin/nvcc` since make fails to find nvcc `/bin/sh: line 1: /bin/nvcc: No such file or directory` 2. copy all files from `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\include` to `./llama` since its fails to find `cuda_runtime.h` ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version _No response_
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7491/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7491/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5479
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5479/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5479/comments
https://api.github.com/repos/ollama/ollama/issues/5479/events
https://github.com/ollama/ollama/pull/5479
2,390,296,663
PR_kwDOJ0Z1Ps50aodv
5,479
Add device cmd/api to query the device information
{ "login": "yeahdongcn", "id": 2831050, "node_id": "MDQ6VXNlcjI4MzEwNTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2831050?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yeahdongcn", "html_url": "https://github.com/yeahdongcn", "followers_url": "https://api.github.com/users/yeahdongcn/followers", "following_url": "https://api.github.com/users/yeahdongcn/following{/other_user}", "gists_url": "https://api.github.com/users/yeahdongcn/gists{/gist_id}", "starred_url": "https://api.github.com/users/yeahdongcn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yeahdongcn/subscriptions", "organizations_url": "https://api.github.com/users/yeahdongcn/orgs", "repos_url": "https://api.github.com/users/yeahdongcn/repos", "events_url": "https://api.github.com/users/yeahdongcn/events{/privacy}", "received_events_url": "https://api.github.com/users/yeahdongcn/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
0
2024-07-04T08:43:49
2024-07-04T11:20:23
null
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5479", "html_url": "https://github.com/ollama/ollama/pull/5479", "diff_url": "https://github.com/ollama/ollama/pull/5479.diff", "patch_url": "https://github.com/ollama/ollama/pull/5479.patch", "merged_at": null }
I'd like to use https://github.com/aidatatools/ollama-benchmark to benchmark Ollama on MTGPU (https://github.com/ollama/ollama/pull/5353) and it depends on GPUtil python package to check the avaible VRAM in choosing correct LLM models. So I plan to add a new `device` cmd/api to query the device information: ```bash # On M1 Mac ➜ ollama device ID NAME LIBRARY TOTAL MEMORY FREE MEMORY 0 Unknown metal 10.7 GiB 10.7 GiB # API ➜ curl localhost:11434/api/device {"devices":[{"id":"0","name":"","library":"metal","total_memory":11453251584,"free_memory":11453251584}]} # On Ubuntu Linux with MTGPU ➜ ollama device ID NAME LIBRARY TOTAL MEMORY FREE MEMORY 0 1ed5:0323 musa 48.0 GiB 48.0 GiB ```
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5479/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5479/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2733
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2733/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2733/comments
https://api.github.com/repos/ollama/ollama/issues/2733/events
https://github.com/ollama/ollama/issues/2733
2,152,403,316
I_kwDOJ0Z1Ps6ASxF0
2,733
Add FireFunctionV1 model to enable SOTA Function Calling
{ "login": "illuminatedjavascript", "id": 84572335, "node_id": "MDQ6VXNlcjg0NTcyMzM1", "avatar_url": "https://avatars.githubusercontent.com/u/84572335?v=4", "gravatar_id": "", "url": "https://api.github.com/users/illuminatedjavascript", "html_url": "https://github.com/illuminatedjavascript", "followers_url": "https://api.github.com/users/illuminatedjavascript/followers", "following_url": "https://api.github.com/users/illuminatedjavascript/following{/other_user}", "gists_url": "https://api.github.com/users/illuminatedjavascript/gists{/gist_id}", "starred_url": "https://api.github.com/users/illuminatedjavascript/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/illuminatedjavascript/subscriptions", "organizations_url": "https://api.github.com/users/illuminatedjavascript/orgs", "repos_url": "https://api.github.com/users/illuminatedjavascript/repos", "events_url": "https://api.github.com/users/illuminatedjavascript/events{/privacy}", "received_events_url": "https://api.github.com/users/illuminatedjavascript/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-02-24T16:58:30
2024-02-24T17:09:50
2024-02-24T17:09:29
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
A good SOTA function-calling model would be a great addition to accompany the existing embeddings and general chat/instruct models. Not sure if this is the place to request model adds, but wanted to point out the Fireworks.ai FireFunctionV1 model that enables SOTA function-calling. https://huggingface.co/fireworks-ai/firefunction-v1 Since we have a good embeddings model with Nomic, I thought a good Function-calling model might be a great addition to Ollama as well. If this isn't the place for it, sorry in advance.
{ "login": "illuminatedjavascript", "id": 84572335, "node_id": "MDQ6VXNlcjg0NTcyMzM1", "avatar_url": "https://avatars.githubusercontent.com/u/84572335?v=4", "gravatar_id": "", "url": "https://api.github.com/users/illuminatedjavascript", "html_url": "https://github.com/illuminatedjavascript", "followers_url": "https://api.github.com/users/illuminatedjavascript/followers", "following_url": "https://api.github.com/users/illuminatedjavascript/following{/other_user}", "gists_url": "https://api.github.com/users/illuminatedjavascript/gists{/gist_id}", "starred_url": "https://api.github.com/users/illuminatedjavascript/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/illuminatedjavascript/subscriptions", "organizations_url": "https://api.github.com/users/illuminatedjavascript/orgs", "repos_url": "https://api.github.com/users/illuminatedjavascript/repos", "events_url": "https://api.github.com/users/illuminatedjavascript/events{/privacy}", "received_events_url": "https://api.github.com/users/illuminatedjavascript/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2733/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2733/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3070
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3070/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3070/comments
https://api.github.com/repos/ollama/ollama/issues/3070/events
https://github.com/ollama/ollama/pull/3070
2,180,461,765
PR_kwDOJ0Z1Ps5pUN3E
3,070
Add docs explaining GPU selection env vars
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-03-11T23:55:08
2024-03-12T19:59:53
2024-03-12T18:36:46
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3070", "html_url": "https://github.com/ollama/ollama/pull/3070", "diff_url": "https://github.com/ollama/ollama/pull/3070.diff", "patch_url": "https://github.com/ollama/ollama/pull/3070.patch", "merged_at": "2024-03-12T18:36:46" }
Fixes #2781 Fixes #2156
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3070/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3070/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2373
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2373/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2373/comments
https://api.github.com/repos/ollama/ollama/issues/2373/events
https://github.com/ollama/ollama/issues/2373
2,121,322,173
I_kwDOJ0Z1Ps5-cM69
2,373
generate-rocm failing for all builds
{ "login": "rootedbox", "id": 3997890, "node_id": "MDQ6VXNlcjM5OTc4OTA=", "avatar_url": "https://avatars.githubusercontent.com/u/3997890?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rootedbox", "html_url": "https://github.com/rootedbox", "followers_url": "https://api.github.com/users/rootedbox/followers", "following_url": "https://api.github.com/users/rootedbox/following{/other_user}", "gists_url": "https://api.github.com/users/rootedbox/gists{/gist_id}", "starred_url": "https://api.github.com/users/rootedbox/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rootedbox/subscriptions", "organizations_url": "https://api.github.com/users/rootedbox/orgs", "repos_url": "https://api.github.com/users/rootedbox/repos", "events_url": "https://api.github.com/users/rootedbox/events{/privacy}", "received_events_url": "https://api.github.com/users/rootedbox/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-02-06T17:21:10
2024-02-06T17:41:04
2024-02-06T17:41:04
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
https://github.com/ollama/ollama/actions/runs/7802488482/job/21280059103
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2373/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2373/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1002
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1002/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1002/comments
https://api.github.com/repos/ollama/ollama/issues/1002/events
https://github.com/ollama/ollama/issues/1002
1,977,390,851
I_kwDOJ0Z1Ps513JcD
1,002
Error: max retries exceeded
{ "login": "givensuman", "id": 16063606, "node_id": "MDQ6VXNlcjE2MDYzNjA2", "avatar_url": "https://avatars.githubusercontent.com/u/16063606?v=4", "gravatar_id": "", "url": "https://api.github.com/users/givensuman", "html_url": "https://github.com/givensuman", "followers_url": "https://api.github.com/users/givensuman/followers", "following_url": "https://api.github.com/users/givensuman/following{/other_user}", "gists_url": "https://api.github.com/users/givensuman/gists{/gist_id}", "starred_url": "https://api.github.com/users/givensuman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/givensuman/subscriptions", "organizations_url": "https://api.github.com/users/givensuman/orgs", "repos_url": "https://api.github.com/users/givensuman/repos", "events_url": "https://api.github.com/users/givensuman/events{/privacy}", "received_events_url": "https://api.github.com/users/givensuman/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2023-11-04T15:23:22
2023-11-05T02:12:13
2023-11-05T02:12:13
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello, I'm trying to get up and running, but I keep getting this error: ```bash pulling manifest pulling 66002b78c70a... 85% |██████████████████████████████████████████████████████ | (1.7/2.0 GB) [9m32s:0s] Error: max retries exceeded ``` The above was me trying `ollama run orca-mini`, but it happens with other models around the 1.6-1.7GB mark. I am running this in WSL with Ubuntu as the virtual distro. It seems like a simple issue but I don't see any reference to it here, so was hoping to at least document it. Does anyone know what `Error: max retries exceeded` means and how to resolve it?
{ "login": "givensuman", "id": 16063606, "node_id": "MDQ6VXNlcjE2MDYzNjA2", "avatar_url": "https://avatars.githubusercontent.com/u/16063606?v=4", "gravatar_id": "", "url": "https://api.github.com/users/givensuman", "html_url": "https://github.com/givensuman", "followers_url": "https://api.github.com/users/givensuman/followers", "following_url": "https://api.github.com/users/givensuman/following{/other_user}", "gists_url": "https://api.github.com/users/givensuman/gists{/gist_id}", "starred_url": "https://api.github.com/users/givensuman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/givensuman/subscriptions", "organizations_url": "https://api.github.com/users/givensuman/orgs", "repos_url": "https://api.github.com/users/givensuman/repos", "events_url": "https://api.github.com/users/givensuman/events{/privacy}", "received_events_url": "https://api.github.com/users/givensuman/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1002/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/1002/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/875
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/875/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/875/comments
https://api.github.com/repos/ollama/ollama/issues/875/events
https://github.com/ollama/ollama/issues/875
1,955,746,834
I_kwDOJ0Z1Ps50klQS
875
Embed a UI with Ollama
{ "login": "coolaj86", "id": 122831, "node_id": "MDQ6VXNlcjEyMjgzMQ==", "avatar_url": "https://avatars.githubusercontent.com/u/122831?v=4", "gravatar_id": "", "url": "https://api.github.com/users/coolaj86", "html_url": "https://github.com/coolaj86", "followers_url": "https://api.github.com/users/coolaj86/followers", "following_url": "https://api.github.com/users/coolaj86/following{/other_user}", "gists_url": "https://api.github.com/users/coolaj86/gists{/gist_id}", "starred_url": "https://api.github.com/users/coolaj86/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/coolaj86/subscriptions", "organizations_url": "https://api.github.com/users/coolaj86/orgs", "repos_url": "https://api.github.com/users/coolaj86/repos", "events_url": "https://api.github.com/users/coolaj86/events{/privacy}", "received_events_url": "https://api.github.com/users/coolaj86/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-10-22T03:40:04
2024-01-23T12:54:06
2023-10-24T21:11:09
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Re: #874, https://github.com/ollama-webui/ollama-webui/pull/10 Once this next commit makes it into `ollama-webui` (which seems to be among the best of all the local ChatGPT UIs that I've found so far), it would be trivial to embed that interface directly into `ollama` so that it becomes more accessible to more people. Using some `git tag` magic and a little other automation, this could be seamless and fool-proof to automate for releases.
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/875/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/875/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3646
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3646/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3646/comments
https://api.github.com/repos/ollama/ollama/issues/3646/events
https://github.com/ollama/ollama/issues/3646
2,243,140,354
I_kwDOJ0Z1Ps6Fs5sC
3,646
verifying github.com/google/flatbuffers@v1.12.0: checksum mismatch
{ "login": "juhp", "id": 82731, "node_id": "MDQ6VXNlcjgyNzMx", "avatar_url": "https://avatars.githubusercontent.com/u/82731?v=4", "gravatar_id": "", "url": "https://api.github.com/users/juhp", "html_url": "https://github.com/juhp", "followers_url": "https://api.github.com/users/juhp/followers", "following_url": "https://api.github.com/users/juhp/following{/other_user}", "gists_url": "https://api.github.com/users/juhp/gists{/gist_id}", "starred_url": "https://api.github.com/users/juhp/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/juhp/subscriptions", "organizations_url": "https://api.github.com/users/juhp/orgs", "repos_url": "https://api.github.com/users/juhp/repos", "events_url": "https://api.github.com/users/juhp/events{/privacy}", "received_events_url": "https://api.github.com/users/juhp/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
2024-04-15T09:16:11
2024-04-17T07:47:56
2024-04-15T19:17:25
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? A clean checkout and build of ollama fails like google/flatbuffers#6466 ### What did you expect to see? No checksum error The easiest solution is bump the flatbuffers dependency to v1.12.1 which works around this upstream issue. ### Steps to reproduce ```shellsession [~/go]$ git clone -q https://github.com/ollama/ollama.git [~/go]$ cd ollama [~/go/ollama:main]$ go generate ./... go: downloading github.com/google/uuid v1.0.0 verifying github.com/google/flatbuffers@v1.12.0: checksum mismatch downloaded: h1:N8EguYFm2wwdpoNcpchQY0tPs85vOJkboFb2dPxmixo= go.sum: h1:/PtAHvnBY4Kqnx/xCQ3OIV9uYcSFGScBsWI3Oogeh6w= SECURITY ERROR This download does NOT match an earlier download recorded in go.sum. The bits may have been replaced on the origin server, or an attacker may have intercepted the download attempt. For more information, see 'go help module-auth'. ``` ### Are there any recent changes that introduced the issue? _No response_ ### OS Linux ### Architecture amd64 ### Platform _No response_ ### Ollama version _No response_ ### GPU _No response_ ### GPU info _No response_ ### CPU _No response_ ### Other software _No response_
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3646/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3646/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/981
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/981/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/981/comments
https://api.github.com/repos/ollama/ollama/issues/981/events
https://github.com/ollama/ollama/issues/981
1,975,231,759
I_kwDOJ0Z1Ps51u6UP
981
Add message history to the `/generate` API
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
0
2023-11-03T00:11:26
2023-12-04T23:01:07
2023-12-04T23:01:07
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Add a new `messages` field to the `/generate` API that takes an array of past messages in the conversation history. This field can replace `context` (although, we will continue to support both for now).
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/981/reactions", "total_count": 11, "+1": 5, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 6, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/981/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5537
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5537/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5537/comments
https://api.github.com/repos/ollama/ollama/issues/5537/events
https://github.com/ollama/ollama/issues/5537
2,394,404,944
I_kwDOJ0Z1Ps6Ot7hQ
5,537
deepseek code v2 inference downgrade after a few inference
{ "login": "kidoln", "id": 886552, "node_id": "MDQ6VXNlcjg4NjU1Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/886552?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kidoln", "html_url": "https://github.com/kidoln", "followers_url": "https://api.github.com/users/kidoln/followers", "following_url": "https://api.github.com/users/kidoln/following{/other_user}", "gists_url": "https://api.github.com/users/kidoln/gists{/gist_id}", "starred_url": "https://api.github.com/users/kidoln/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kidoln/subscriptions", "organizations_url": "https://api.github.com/users/kidoln/orgs", "repos_url": "https://api.github.com/users/kidoln/repos", "events_url": "https://api.github.com/users/kidoln/events{/privacy}", "received_events_url": "https://api.github.com/users/kidoln/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
0
2024-07-08T01:37:39
2024-07-08T01:44:22
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I am using ollama for deepseed coder model make fill-in-middel inference, and it was working fine initially. However, after more than 50 consecutive inferences, the model output suddenly became abnormal, producing nonsensical content. abnormal output like this. #generategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerategenerate In these calls, the call parameters remain consistent. ``` data = { "model": "deepseek-coder-v2:latest", "prompt": prompt, "raw": True, "stream": False, "keep_alive": -1, "seed": 40, "options": { "num_predict": num_p, "top_k": 1, "top_p": 0.0, "temperature": 0.0, "repeat_penalty": 1.1, } } ``` ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.1.48
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5537/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5537/timeline
null
null
false