url
stringlengths
51
54
repository_url
stringclasses
1 value
labels_url
stringlengths
65
68
comments_url
stringlengths
60
63
events_url
stringlengths
58
61
html_url
stringlengths
39
44
id
int64
1.78B
2.82B
node_id
stringlengths
18
19
number
int64
1
8.69k
title
stringlengths
1
382
user
dict
labels
listlengths
0
5
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
2
milestone
null
comments
int64
0
323
created_at
timestamp[s]
updated_at
timestamp[s]
closed_at
timestamp[s]
author_association
stringclasses
4 values
sub_issues_summary
dict
active_lock_reason
null
draft
bool
2 classes
pull_request
dict
body
stringlengths
2
118k
closed_by
dict
reactions
dict
timeline_url
stringlengths
60
63
performed_via_github_app
null
state_reason
stringclasses
4 values
is_pull_request
bool
2 classes
https://api.github.com/repos/ollama/ollama/issues/5354
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5354/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5354/comments
https://api.github.com/repos/ollama/ollama/issues/5354/events
https://github.com/ollama/ollama/issues/5354
2,379,496,796
I_kwDOJ0Z1Ps6N1D1c
5,354
ollama should detect native windows proxy configuration
{ "login": "smallg0at", "id": 30719670, "node_id": "MDQ6VXNlcjMwNzE5Njcw", "avatar_url": "https://avatars.githubusercontent.com/u/30719670?v=4", "gravatar_id": "", "url": "https://api.github.com/users/smallg0at", "html_url": "https://github.com/smallg0at", "followers_url": "https://api.github.com/users/smallg0at/followers", "following_url": "https://api.github.com/users/smallg0at/following{/other_user}", "gists_url": "https://api.github.com/users/smallg0at/gists{/gist_id}", "starred_url": "https://api.github.com/users/smallg0at/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/smallg0at/subscriptions", "organizations_url": "https://api.github.com/users/smallg0at/orgs", "repos_url": "https://api.github.com/users/smallg0at/repos", "events_url": "https://api.github.com/users/smallg0at/events{/privacy}", "received_events_url": "https://api.github.com/users/smallg0at/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 5860134234, "node_id": "LA_kwDOJ0Z1Ps8AAAABXUqNWg", "url": "https://api.github.com/repos/ollama/ollama/labels/windows", "name": "windows", "color": "0052CC", "default": false, "description": "" }, { "id": 6677370291, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCVsw", "url": "https://api.github.com/repos/ollama/ollama/labels/networking", "name": "networking", "color": "0B5368", "default": false, "description": "Issues relating to ollama pull and push" } ]
open
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-06-28T03:36:12
2024-07-05T16:45:10
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Ollama seems to fail to update itself in recent versions and app logs are as follow: ``` time=2024-06-28T11:23:56.487+08:00 level=INFO source=logging.go:50 msg="ollama app started" time=2024-06-28T11:23:56.540+08:00 level=INFO source=server.go:176 msg="unable to connect to server" time=2024-06-28T11:23:56.540+08:00 level=INFO source=server.go:135 msg="starting server..." time=2024-06-28T11:23:56.547+08:00 level=INFO source=server.go:121 msg="started ollama server with pid 31184" time=2024-06-28T11:23:56.547+08:00 level=INFO source=server.go:123 msg="ollama server logs C:\\Users\\<username>\\AppData\\Local\\Ollama\\server.log" time=2024-06-28T11:24:00.238+08:00 level=INFO source=updater.go:102 msg="New update available at https://github.com/ollama/ollama/releases/download/v0.1.47/OllamaSetup.exe" time=2024-06-28T11:24:00.257+08:00 level=ERROR source=updater.go:212 msg="failed to download new release: error checking update: Head \"https://github.com/ollama/ollama/releases/download/v0.1.47/OllamaSetup.exe\": dial tcp 127.0.0.1:443: connectex: No connection could be made because the target machine actively refused it." ``` I'm behind a proxy and its obviously not on the 443 port, and ollama server is not listening on it. running `nslookup` also shows the correct IP rather than localhost. So I have a suspicion that something in proxy detection has gone wrong... ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.1.46
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5354/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5354/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/4640
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4640/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4640/comments
https://api.github.com/repos/ollama/ollama/issues/4640/events
https://github.com/ollama/ollama/pull/4640
2,317,173,218
PR_kwDOJ0Z1Ps5wj3Zw
4,640
Supports OpenAI multimodal API access
{ "login": "elifriedman", "id": 2895084, "node_id": "MDQ6VXNlcjI4OTUwODQ=", "avatar_url": "https://avatars.githubusercontent.com/u/2895084?v=4", "gravatar_id": "", "url": "https://api.github.com/users/elifriedman", "html_url": "https://github.com/elifriedman", "followers_url": "https://api.github.com/users/elifriedman/followers", "following_url": "https://api.github.com/users/elifriedman/following{/other_user}", "gists_url": "https://api.github.com/users/elifriedman/gists{/gist_id}", "starred_url": "https://api.github.com/users/elifriedman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/elifriedman/subscriptions", "organizations_url": "https://api.github.com/users/elifriedman/orgs", "repos_url": "https://api.github.com/users/elifriedman/repos", "events_url": "https://api.github.com/users/elifriedman/events{/privacy}", "received_events_url": "https://api.github.com/users/elifriedman/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-05-25T18:44:06
2024-09-05T05:43:03
2024-09-05T05:43:03
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4640", "html_url": "https://github.com/ollama/ollama/pull/4640", "diff_url": "https://github.com/ollama/ollama/pull/4640.diff", "patch_url": "https://github.com/ollama/ollama/pull/4640.patch", "merged_at": null }
Allows for responses like: ```python response = client.chat.completions.create( model="llava-llama3", # or any other multimodal model messages=[ {"role": "user", "content": "simple text in the content key."}, { "role": "user", "content": [ {"type": "text", "text": "text inside of a list of dicts"}, # currently unsupported { "type": "image_url", "image_url": {"url": "data:image/png;base64,iVBORw0.."}, # base64 encoded images }, { "type": "image_url", "image_url": {"url": "https://live.staticflickr.com/....png"}, # urls }, ], }, ], ) ```
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4640/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4640/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4202
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4202/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4202/comments
https://api.github.com/repos/ollama/ollama/issues/4202/events
https://github.com/ollama/ollama/issues/4202
2,281,156,648
I_kwDOJ0Z1Ps6H97Ao
4,202
An existing connection was forcibly closed by the remote host.
{ "login": "zhangweiwei0326", "id": 5975616, "node_id": "MDQ6VXNlcjU5NzU2MTY=", "avatar_url": "https://avatars.githubusercontent.com/u/5975616?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zhangweiwei0326", "html_url": "https://github.com/zhangweiwei0326", "followers_url": "https://api.github.com/users/zhangweiwei0326/followers", "following_url": "https://api.github.com/users/zhangweiwei0326/following{/other_user}", "gists_url": "https://api.github.com/users/zhangweiwei0326/gists{/gist_id}", "starred_url": "https://api.github.com/users/zhangweiwei0326/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zhangweiwei0326/subscriptions", "organizations_url": "https://api.github.com/users/zhangweiwei0326/orgs", "repos_url": "https://api.github.com/users/zhangweiwei0326/repos", "events_url": "https://api.github.com/users/zhangweiwei0326/events{/privacy}", "received_events_url": "https://api.github.com/users/zhangweiwei0326/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
1
2024-05-06T15:30:29
2024-05-08T20:24:02
2024-05-08T20:24:02
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ![image](https://github.com/ollama/ollama/assets/5975616/c8c90991-a0bd-40d3-b255-fd3b90cbaf68) No matter how it is done, it is the error. Replacing the agent without using the agent is invalid. Please provide a thorough solution. thank you ### OS _No response_ ### GPU _No response_ ### CPU _No response_ ### Ollama version _No response_
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4202/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4202/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1878
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1878/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1878/comments
https://api.github.com/repos/ollama/ollama/issues/1878/events
https://github.com/ollama/ollama/pull/1878
2,073,167,627
PR_kwDOJ0Z1Ps5jnu0k
1,878
Typo Correction in API Documentation
{ "login": "nahakiole", "id": 4542479, "node_id": "MDQ6VXNlcjQ1NDI0Nzk=", "avatar_url": "https://avatars.githubusercontent.com/u/4542479?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nahakiole", "html_url": "https://github.com/nahakiole", "followers_url": "https://api.github.com/users/nahakiole/followers", "following_url": "https://api.github.com/users/nahakiole/following{/other_user}", "gists_url": "https://api.github.com/users/nahakiole/gists{/gist_id}", "starred_url": "https://api.github.com/users/nahakiole/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nahakiole/subscriptions", "organizations_url": "https://api.github.com/users/nahakiole/orgs", "repos_url": "https://api.github.com/users/nahakiole/repos", "events_url": "https://api.github.com/users/nahakiole/events{/privacy}", "received_events_url": "https://api.github.com/users/nahakiole/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-01-09T21:05:47
2024-01-09T21:21:18
2024-01-09T21:21:18
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1878", "html_url": "https://github.com/ollama/ollama/pull/1878", "diff_url": "https://github.com/ollama/ollama/pull/1878.diff", "patch_url": "https://github.com/ollama/ollama/pull/1878.patch", "merged_at": "2024-01-09T21:21:18" }
I've noticed a small typo in the API documentation and have submitted a fix for it. The "role" value was written as "assisant" which I've updated to "assistant".
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1878/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1878/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2360
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2360/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2360/comments
https://api.github.com/repos/ollama/ollama/issues/2360/events
https://github.com/ollama/ollama/issues/2360
2,118,051,155
I_kwDOJ0Z1Ps5-PuVT
2,360
Add the newest deepseek-coder models
{ "login": "valerybugakov", "id": 3846380, "node_id": "MDQ6VXNlcjM4NDYzODA=", "avatar_url": "https://avatars.githubusercontent.com/u/3846380?v=4", "gravatar_id": "", "url": "https://api.github.com/users/valerybugakov", "html_url": "https://github.com/valerybugakov", "followers_url": "https://api.github.com/users/valerybugakov/followers", "following_url": "https://api.github.com/users/valerybugakov/following{/other_user}", "gists_url": "https://api.github.com/users/valerybugakov/gists{/gist_id}", "starred_url": "https://api.github.com/users/valerybugakov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/valerybugakov/subscriptions", "organizations_url": "https://api.github.com/users/valerybugakov/orgs", "repos_url": "https://api.github.com/users/valerybugakov/repos", "events_url": "https://api.github.com/users/valerybugakov/events{/privacy}", "received_events_url": "https://api.github.com/users/valerybugakov/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-02-05T09:22:50
2024-11-12T02:03:37
2024-11-12T02:03:37
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
- Instruct https://huggingface.co/deepseek-ai/deepseek-coder-7b-instruct-v1.5 - Base https://huggingface.co/deepseek-ai/deepseek-coder-7b-base-v1.5 - Instruct GGUF https://huggingface.co/LoneStriker/deepseek-coder-7b-instruct-v1.5-GGUF - Base GGUF https://huggingface.co/dagbs/deepseek-coder-7b-base-v1.5-GGUF
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2360/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/2360/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6529
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6529/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6529/comments
https://api.github.com/repos/ollama/ollama/issues/6529/events
https://github.com/ollama/ollama/issues/6529
2,490,108,511
I_kwDOJ0Z1Ps6UbApf
6,529
Ollama will stop using GPU when the total graphics memory usage exceeds the dedicated graphics memory size
{ "login": "UserGzy", "id": 142336365, "node_id": "U_kgDOCHvhbQ", "avatar_url": "https://avatars.githubusercontent.com/u/142336365?v=4", "gravatar_id": "", "url": "https://api.github.com/users/UserGzy", "html_url": "https://github.com/UserGzy", "followers_url": "https://api.github.com/users/UserGzy/followers", "following_url": "https://api.github.com/users/UserGzy/following{/other_user}", "gists_url": "https://api.github.com/users/UserGzy/gists{/gist_id}", "starred_url": "https://api.github.com/users/UserGzy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/UserGzy/subscriptions", "organizations_url": "https://api.github.com/users/UserGzy/orgs", "repos_url": "https://api.github.com/users/UserGzy/repos", "events_url": "https://api.github.com/users/UserGzy/events{/privacy}", "received_events_url": "https://api.github.com/users/UserGzy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-08-27T18:53:55
2024-08-28T20:45:36
2024-08-28T20:45:36
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When I tried to run models, I noticed that once the total graphics memory usage exceeded the dedicated graphics memory size of the graphics card, Ollama would stop using GPU for inference. I saw in the task manager that the utilization rate of the graphics card was 0%. But when I tried to use a low graphics memory usage model, the GPU utilization increased to 100%. ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.3.6
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6529/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6529/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/925
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/925/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/925/comments
https://api.github.com/repos/ollama/ollama/issues/925/events
https://github.com/ollama/ollama/issues/925
1,964,759,146
I_kwDOJ0Z1Ps51G9hq
925
Tab Completion
{ "login": "hemanth", "id": 18315, "node_id": "MDQ6VXNlcjE4MzE1", "avatar_url": "https://avatars.githubusercontent.com/u/18315?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hemanth", "html_url": "https://github.com/hemanth", "followers_url": "https://api.github.com/users/hemanth/followers", "following_url": "https://api.github.com/users/hemanth/following{/other_user}", "gists_url": "https://api.github.com/users/hemanth/gists{/gist_id}", "starred_url": "https://api.github.com/users/hemanth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hemanth/subscriptions", "organizations_url": "https://api.github.com/users/hemanth/orgs", "repos_url": "https://api.github.com/users/hemanth/repos", "events_url": "https://api.github.com/users/hemanth/events{/privacy}", "received_events_url": "https://api.github.com/users/hemanth/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
1
2023-10-27T04:46:52
2024-01-27T06:33:26
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
```sh > ollama run <tab> ``` Should tab complete the model names. Same with ```sh ollama show --modelfile ```
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/925/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/925/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/4776
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4776/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4776/comments
https://api.github.com/repos/ollama/ollama/issues/4776/events
https://github.com/ollama/ollama/issues/4776
2,329,418,943
I_kwDOJ0Z1Ps6K2By_
4,776
Can`t run 0.1.40 on window10
{ "login": "18657769868", "id": 22534813, "node_id": "MDQ6VXNlcjIyNTM0ODEz", "avatar_url": "https://avatars.githubusercontent.com/u/22534813?v=4", "gravatar_id": "", "url": "https://api.github.com/users/18657769868", "html_url": "https://github.com/18657769868", "followers_url": "https://api.github.com/users/18657769868/followers", "following_url": "https://api.github.com/users/18657769868/following{/other_user}", "gists_url": "https://api.github.com/users/18657769868/gists{/gist_id}", "starred_url": "https://api.github.com/users/18657769868/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/18657769868/subscriptions", "organizations_url": "https://api.github.com/users/18657769868/orgs", "repos_url": "https://api.github.com/users/18657769868/repos", "events_url": "https://api.github.com/users/18657769868/events{/privacy}", "received_events_url": "https://api.github.com/users/18657769868/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
6
2024-06-02T01:29:01
2024-06-02T03:40:02
2024-06-02T03:40:02
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Can`t run 0.1.40 on window10 ![33e3def630c5d32e709d75d6753493b](https://github.com/ollama/ollama/assets/22534813/df4acf59-5a2c-4aef-a819-fbbd8ae7e362) 2024/06/02 09:15:34 routes.go:1007: INFO server config env="map[OLLAMA_DEBUG:false OLLAMA_FLASH_ATTENTION:false OLLAMA_HOST: OLLAMA_KEEP_ALIVE: OLLAMA_LLM_LIBRARY: OLLAMA_MAX_LOADED_MODELS:1 OLLAMA_MAX_QUEUE:512 OLLAMA_MAX_VRAM:0 OLLAMA_MODELS: OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:1 OLLAMA_ORIGINS:[* http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:*] OLLAMA_RUNNERS_DIR:C:\\Users\\xinhu\\AppData\\Local\\Programs\\Ollama\\ollama_runners OLLAMA_TMPDIR:]" time=2024-06-02T09:15:34.741+08:00 level=INFO source=images.go:729 msg="total blobs: 95" time=2024-06-02T09:15:34.751+08:00 level=INFO source=images.go:736 msg="total unused blobs removed: 0" time=2024-06-02T09:15:34.756+08:00 level=INFO source=routes.go:1053 msg="Listening on [::]:11434 (version 0.1.40)" time=2024-06-02T09:15:34.756+08:00 level=INFO source=payload.go:44 msg="Dynamic LLM libraries [rocm_v5.7 cpu cpu_avx cpu_avx2 cuda_v11.3]" Exception 0xc0000005 0x8 0x0 0x0 PC=0x0 signal arrived during external code execution runtime.cgocall(0x11c07c0, 0xc00036f400) runtime/cgocall.go:157 +0x3e fp=0xc00036f3d8 sp=0xc00036f3a0 pc=0x6893be github.com/ollama/ollama/gpu._Cfunc_oneapi_init(0x24b4dff5d30, 0xc000760660) _cgo_gotypes.go:529 +0x4d fp=0xc00036f400 sp=0xc00036f3d8 pc=0xa8c44d github.com/ollama/ollama/gpu.LoadOneapiMgmt.func2(0x24b4dff5d30, 0xc000760660) github.com/ollama/ollama/gpu/gpu.go:400 +0x4a fp=0xc00036f430 sp=0xc00036f400 pc=0xa8f96a github.com/ollama/ollama/gpu.LoadOneapiMgmt({0xc000704cc0, 0x1, 0x14321b0?}) github.com/ollama/ollama/gpu/gpu.go:400 +0x23e fp=0xc00036f528 sp=0xc00036f430 pc=0xa8f7be github.com/ollama/ollama/gpu.initGPUHandles() github.com/ollama/ollama/gpu/gpu.go:164 +0x2e5 fp=0xc00036f728 sp=0xc00036f528 pc=0xa8c845 github.com/ollama/ollama/gpu.GetGPUInfo() github.com/ollama/ollama/gpu/gpu.go:182 +0xa7 fp=0xc00036fac0 sp=0xc00036f728 pc=0xa8cce7 github.com/ollama/ollama/server.Serve({0x17dca40, 0xc000079220}) github.com/ollama/ollama/server/routes.go:1078 +0x7bb fp=0xc00036fc70 sp=0xc00036fac0 pc=0x119803b github.com/ollama/ollama/cmd.RunServer(0xc0001a7100?, {0x1fa8720?, 0x4?, 0x163c7fe?}) github.com/ollama/ollama/cmd/cmd.go:979 +0x17c fp=0xc00036fd58 sp=0xc00036fc70 pc=0x11b6c9c github.com/spf13/cobra.(*Command).execute(0xc0000b0308, {0x1fa8720, 0x0, 0x0}) github.com/spf13/cobra@v1.7.0/command.go:940 +0x882 fp=0xc00036fe78 sp=0xc00036fd58 pc=0xa2b5e2 github.com/spf13/cobra.(*Command).ExecuteC(0xc00044bb08) github.com/spf13/cobra@v1.7.0/command.go:1068 +0x3a5 fp=0xc00036ff30 sp=0xc00036fe78 pc=0xa2be25 github.com/spf13/cobra.(*Command).Execute(...) github.com/spf13/cobra@v1.7.0/command.go:992 github.com/spf13/cobra.(*Command).ExecuteContext(...) github.com/spf13/cobra@v1.7.0/command.go:985 main.main() github.com/ollama/ollama/main.go:11 +0x4d fp=0xc00036ff50 sp=0xc00036ff30 pc=0x11c034d runtime.main() runtime/proc.go:271 +0x28b fp=0xc00036ffe0 sp=0xc00036ff50 pc=0x6c13eb runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc00036ffe8 sp=0xc00036ffe0 pc=0x6f2561 goroutine 2 gp=0xc00006a700 m=nil [force gc (idle)]: runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc00006dfa8 sp=0xc00006df88 pc=0x6c17ee runtime.goparkunlock(...) runtime/proc.go:408 runtime.forcegchelper() runtime/proc.go:326 +0xb8 fp=0xc00006dfe0 sp=0xc00006dfa8 pc=0x6c1678 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc00006dfe8 sp=0xc00006dfe0 pc=0x6f2561 created by runtime.init.6 in goroutine 1 runtime/proc.go:314 +0x1a goroutine 3 gp=0xc00006aa80 m=nil [GC sweep wait]: runtime.gopark(0x1?, 0x0?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc00006ff80 sp=0xc00006ff60 pc=0x6c17ee runtime.goparkunlock(...) runtime/proc.go:408 runtime.bgsweep(0xc00007a000) runtime/mgcsweep.go:318 +0xdf fp=0xc00006ffc8 sp=0xc00006ff80 pc=0x6ab89f runtime.gcenable.gowrap1() runtime/mgc.go:203 +0x25 fp=0xc00006ffe0 sp=0xc00006ffc8 pc=0x6a0145 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc00006ffe8 sp=0xc00006ffe0 pc=0x6f2561 created by runtime.gcenable in goroutine 1 runtime/mgc.go:203 +0x66 goroutine 4 gp=0xc00006ac40 m=nil [GC scavenge wait]: runtime.gopark(0x10000?, 0x17ce728?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc000085f78 sp=0xc000085f58 pc=0x6c17ee runtime.goparkunlock(...) runtime/proc.go:408 runtime.(*scavengerState).park(0x1f1c120) runtime/mgcscavenge.go:425 +0x49 fp=0xc000085fa8 sp=0xc000085f78 pc=0x6a9229 runtime.bgscavenge(0xc00007a000) runtime/mgcscavenge.go:658 +0x59 fp=0xc000085fc8 sp=0xc000085fa8 pc=0x6a97d9 runtime.gcenable.gowrap2() runtime/mgc.go:204 +0x25 fp=0xc000085fe0 sp=0xc000085fc8 pc=0x6a00e5 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc000085fe8 sp=0xc000085fe0 pc=0x6f2561 created by runtime.gcenable in goroutine 1 runtime/mgc.go:204 +0xa5 goroutine 5 gp=0xc00006b180 m=nil [finalizer wait]: runtime.gopark(0xc000071e48?, 0x693505?, 0xa8?, 0x1?, 0xc00006a000?) runtime/proc.go:402 +0xce fp=0xc000071e20 sp=0xc000071e00 pc=0x6c17ee runtime.runfinq() runtime/mfinal.go:194 +0x107 fp=0xc000071fe0 sp=0xc000071e20 pc=0x69f1c7 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc000071fe8 sp=0xc000071fe0 pc=0x6f2561 created by runtime.createfing in goroutine 1 runtime/mfinal.go:164 +0x3d goroutine 6 gp=0xc0001dd180 m=nil [GC worker (idle)]: runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc000087f50 sp=0xc000087f30 pc=0x6c17ee runtime.gcBgMarkWorker() runtime/mgc.go:1310 +0xe5 fp=0xc000087fe0 sp=0xc000087f50 pc=0x6a2285 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc000087fe8 sp=0xc000087fe0 pc=0x6f2561 created by runtime.gcBgMarkStartWorkers in goroutine 1 runtime/mgc.go:1234 +0x1c goroutine 18 gp=0xc000480000 m=nil [GC worker (idle)]: runtime.gopark(0x2b1163881270?, 0x3?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc000081f50 sp=0xc000081f30 pc=0x6c17ee runtime.gcBgMarkWorker() runtime/mgc.go:1310 +0xe5 fp=0xc000081fe0 sp=0xc000081f50 pc=0x6a2285 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc000081fe8 sp=0xc000081fe0 pc=0x6f2561 created by runtime.gcBgMarkStartWorkers in goroutine 1 runtime/mgc.go:1234 +0x1c goroutine 34 gp=0xc00008c1c0 m=nil [GC worker (idle)]: runtime.gopark(0x2b11639793f8?, 0x0?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc000095f50 sp=0xc000095f30 pc=0x6c17ee runtime.gcBgMarkWorker() runtime/mgc.go:1310 +0xe5 fp=0xc000095fe0 sp=0xc000095f50 pc=0x6a2285 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc000095fe8 sp=0xc000095fe0 pc=0x6f2561 created by runtime.gcBgMarkStartWorkers in goroutine 1 runtime/mgc.go:1234 +0x1c goroutine 7 gp=0xc0001dd340 m=nil [GC worker (idle)]: runtime.gopark(0x2b1163881270?, 0x3?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc000091f50 sp=0xc000091f30 pc=0x6c17ee runtime.gcBgMarkWorker() runtime/mgc.go:1310 +0xe5 fp=0xc000091fe0 sp=0xc000091f50 pc=0x6a2285 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc000091fe8 sp=0xc000091fe0 pc=0x6f2561 created by runtime.gcBgMarkStartWorkers in goroutine 1 runtime/mgc.go:1234 +0x1c goroutine 8 gp=0xc0001dd500 m=nil [GC worker (idle)]: runtime.gopark(0x2b1163881270?, 0x1?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc000093f50 sp=0xc000093f30 pc=0x6c17ee runtime.gcBgMarkWorker() runtime/mgc.go:1310 +0xe5 fp=0xc000093fe0 sp=0xc000093f50 pc=0x6a2285 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc000093fe8 sp=0xc000093fe0 pc=0x6f2561 created by runtime.gcBgMarkStartWorkers in goroutine 1 runtime/mgc.go:1234 +0x1c goroutine 35 gp=0xc00008c380 m=nil [GC worker (idle)]: runtime.gopark(0x2b11639793f8?, 0x0?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc000097f50 sp=0xc000097f30 pc=0x6c17ee runtime.gcBgMarkWorker() runtime/mgc.go:1310 +0xe5 fp=0xc000097fe0 sp=0xc000097f50 pc=0x6a2285 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc000097fe8 sp=0xc000097fe0 pc=0x6f2561 created by runtime.gcBgMarkStartWorkers in goroutine 1 runtime/mgc.go:1234 +0x1c goroutine 19 gp=0xc0004801c0 m=nil [GC worker (idle)]: runtime.gopark(0x2b1163881270?, 0x3?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc000083f50 sp=0xc000083f30 pc=0x6c17ee runtime.gcBgMarkWorker() runtime/mgc.go:1310 +0xe5 fp=0xc000083fe0 sp=0xc000083f50 pc=0x6a2285 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc000083fe8 sp=0xc000083fe0 pc=0x6f2561 created by runtime.gcBgMarkStartWorkers in goroutine 1 runtime/mgc.go:1234 +0x1c goroutine 36 gp=0xc00008c540 m=nil [GC worker (idle)]: runtime.gopark(0x2b115c057ba0?, 0x0?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc00009ff50 sp=0xc00009ff30 pc=0x6c17ee runtime.gcBgMarkWorker() runtime/mgc.go:1310 +0xe5 fp=0xc00009ffe0 sp=0xc00009ff50 pc=0x6a2285 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc00009ffe8 sp=0xc00009ffe0 pc=0x6f2561 created by runtime.gcBgMarkStartWorkers in goroutine 1 runtime/mgc.go:1234 +0x1c goroutine 9 gp=0xc0001dca80 m=9 mp=0xc000680008 [syscall]: runtime.notetsleepg(0x1fa9320, 0xffffffffffffffff) runtime/lock_sema.go:296 +0x31 fp=0xc00009dfa0 sp=0xc00009df68 pc=0x691ad1 os/signal.signal_recv() runtime/sigqueue.go:152 +0x29 fp=0xc00009dfc0 sp=0xc00009dfa0 pc=0x6ee249 os/signal.loop() os/signal/signal_unix.go:23 +0x13 fp=0xc00009dfe0 sp=0xc00009dfc0 pc=0x9b4093 runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc00009dfe8 sp=0xc00009dfe0 pc=0x6f2561 created by os/signal.Notify.func1.1 in goroutine 1 os/signal/signal.go:151 +0x1f goroutine 10 gp=0xc0001dcc40 m=nil [chan receive]: runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?) runtime/proc.go:402 +0xce fp=0xc0000a1f00 sp=0xc0000a1ee0 pc=0x6c17ee runtime.chanrecv(0xc000760900, 0x0, 0x1) runtime/chan.go:583 +0x3cd fp=0xc0000a1f78 sp=0xc0000a1f00 pc=0x68ba4d runtime.chanrecv1(0x0?, 0x0?) runtime/chan.go:442 +0x12 fp=0xc0000a1fa0 sp=0xc0000a1f78 pc=0x68b652 github.com/ollama/ollama/server.Serve.func2() github.com/ollama/ollama/server/routes.go:1062 +0x3d fp=0xc0000a1fe0 sp=0xc0000a1fa0 pc=0x119817d runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc0000a1fe8 sp=0xc0000a1fe0 pc=0x6f2561 created by github.com/ollama/ollama/server.Serve in goroutine 1 github.com/ollama/ollama/server/routes.go:1061 +0x745 goroutine 11 gp=0xc0001dd6c0 m=nil [select]: runtime.gopark(0xc00026df58?, 0x3?, 0x60?, 0x0?, 0xc00026de32?) runtime/proc.go:402 +0xce fp=0xc00026dcb8 sp=0xc00026dc98 pc=0x6c17ee runtime.selectgo(0xc00026df58, 0xc00026de2c, 0x6?, 0x0, 0x4?, 0x1) runtime/select.go:327 +0x725 fp=0xc00026ddd8 sp=0xc00026dcb8 pc=0x6d1c45 github.com/ollama/ollama/server.(*Scheduler).processPending(0xc00070d720, {0x17df380, 0xc00070d6d0}) github.com/ollama/ollama/server/sched.go:102 +0xcf fp=0xc00026dfb8 sp=0xc00026ddd8 pc=0x119ba0f github.com/ollama/ollama/server.(*Scheduler).Run.func1() github.com/ollama/ollama/server/sched.go:92 +0x1f fp=0xc00026dfe0 sp=0xc00026dfb8 pc=0x119b91f runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc00026dfe8 sp=0xc00026dfe0 pc=0x6f2561 created by github.com/ollama/ollama/server.(*Scheduler).Run in goroutine 1 github.com/ollama/ollama/server/sched.go:91 +0xb4 goroutine 12 gp=0xc0001dd880 m=nil [select]: runtime.gopark(0xc00009bf50?, 0x3?, 0x0?, 0x0?, 0xc00009bd52?) runtime/proc.go:402 +0xce fp=0xc00009bbe0 sp=0xc00009bbc0 pc=0x6c17ee runtime.selectgo(0xc00009bf50, 0xc00009bd4c, 0x0?, 0x0, 0x0?, 0x1) runtime/select.go:327 +0x725 fp=0xc00009bd00 sp=0xc00009bbe0 pc=0x6d1c45 github.com/ollama/ollama/server.(*Scheduler).processCompleted(0xc00070d720, {0x17df380, 0xc00070d6d0}) github.com/ollama/ollama/server/sched.go:214 +0xec fp=0xc00009bfb8 sp=0xc00009bd00 pc=0x119c58c github.com/ollama/ollama/server.(*Scheduler).Run.func2() github.com/ollama/ollama/server/sched.go:96 +0x1f fp=0xc00009bfe0 sp=0xc00009bfb8 pc=0x119b8df runtime.goexit({}) runtime/asm_amd64.s:1695 +0x1 fp=0xc00009bfe8 sp=0xc00009bfe0 pc=0x6f2561 created by github.com/ollama/ollama/server.(*Scheduler).Run in goroutine 1 github.com/ollama/ollama/server/sched.go:95 +0x110 rax 0x7ffd47bfff00 rbx 0xc000760660 rcx 0x0 rdx 0x16c rdi 0xc0007606b0 rsi 0x85f0dffc60 rbp 0x85f0dffc20 rsp 0x85f0dffa88 r8 0x16c r9 0x16c r10 0x16c r11 0x85f0dff760 r12 0x7ffdc10bb1d0 r13 0x1e7d67e r14 0x0 r15 0x85f0dffad0 rip 0x0 rflags 0x10246 cs 0x33 fs 0x53 gs 0x2b ### OS Windows ### GPU Other ### CPU Intel ### Ollama version 0.1.40
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4776/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4776/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6161
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6161/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6161/comments
https://api.github.com/repos/ollama/ollama/issues/6161/events
https://github.com/ollama/ollama/issues/6161
2,447,130,361
I_kwDOJ0Z1Ps6R3D75
6,161
sorting issues on website
{ "login": "TheVegeta", "id": 73519193, "node_id": "MDQ6VXNlcjczNTE5MTkz", "avatar_url": "https://avatars.githubusercontent.com/u/73519193?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TheVegeta", "html_url": "https://github.com/TheVegeta", "followers_url": "https://api.github.com/users/TheVegeta/followers", "following_url": "https://api.github.com/users/TheVegeta/following{/other_user}", "gists_url": "https://api.github.com/users/TheVegeta/gists{/gist_id}", "starred_url": "https://api.github.com/users/TheVegeta/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TheVegeta/subscriptions", "organizations_url": "https://api.github.com/users/TheVegeta/orgs", "repos_url": "https://api.github.com/users/TheVegeta/repos", "events_url": "https://api.github.com/users/TheVegeta/events{/privacy}", "received_events_url": "https://api.github.com/users/TheVegeta/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 6573197867, "node_id": "LA_kwDOJ0Z1Ps8AAAABh8sKKw", "url": "https://api.github.com/repos/ollama/ollama/labels/ollama.com", "name": "ollama.com", "color": "ffffff", "default": false, "description": "" } ]
closed
false
null
[]
null
5
2024-08-04T13:12:53
2024-09-04T03:36:20
2024-09-04T03:36:19
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello there, can someone please guide me to the website's code so I can fix the sorting issue? As you can see in the image below, it's not sorting correctly. After fixing it, I'll create a new pull request. ![ollama_](https://github.com/user-attachments/assets/a0c8ecfd-a172-479f-a38a-e1cf4b0477c2)
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6161/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6161/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3814
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3814/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3814/comments
https://api.github.com/repos/ollama/ollama/issues/3814/events
https://github.com/ollama/ollama/issues/3814
2,255,777,736
I_kwDOJ0Z1Ps6GdG_I
3,814
Error: could not connect to ollama app, is it running?
{ "login": "userandpass", "id": 26294920, "node_id": "MDQ6VXNlcjI2Mjk0OTIw", "avatar_url": "https://avatars.githubusercontent.com/u/26294920?v=4", "gravatar_id": "", "url": "https://api.github.com/users/userandpass", "html_url": "https://github.com/userandpass", "followers_url": "https://api.github.com/users/userandpass/followers", "following_url": "https://api.github.com/users/userandpass/following{/other_user}", "gists_url": "https://api.github.com/users/userandpass/gists{/gist_id}", "starred_url": "https://api.github.com/users/userandpass/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/userandpass/subscriptions", "organizations_url": "https://api.github.com/users/userandpass/orgs", "repos_url": "https://api.github.com/users/userandpass/repos", "events_url": "https://api.github.com/users/userandpass/events{/privacy}", "received_events_url": "https://api.github.com/users/userandpass/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
8
2024-04-22T07:21:26
2024-12-25T23:54:53
2024-05-31T21:43:58
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? 1、modify the ollema.service file 2、systemctl daemon-reload 3、systemctl start ollama ### OS Linux ### GPU Nvidia ### CPU _No response_ ### Ollama version ollama --version Warning: could not connect to a running Ollama instance Warning: client version is 0.1.32
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3814/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3814/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2540
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2540/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2540/comments
https://api.github.com/repos/ollama/ollama/issues/2540/events
https://github.com/ollama/ollama/issues/2540
2,138,621,189
I_kwDOJ0Z1Ps5_eMUF
2,540
Error: listen tcp 127.0.0.1:11434 in windows
{ "login": "razvanab", "id": 2854730, "node_id": "MDQ6VXNlcjI4NTQ3MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/2854730?v=4", "gravatar_id": "", "url": "https://api.github.com/users/razvanab", "html_url": "https://github.com/razvanab", "followers_url": "https://api.github.com/users/razvanab/followers", "following_url": "https://api.github.com/users/razvanab/following{/other_user}", "gists_url": "https://api.github.com/users/razvanab/gists{/gist_id}", "starred_url": "https://api.github.com/users/razvanab/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/razvanab/subscriptions", "organizations_url": "https://api.github.com/users/razvanab/orgs", "repos_url": "https://api.github.com/users/razvanab/repos", "events_url": "https://api.github.com/users/razvanab/events{/privacy}", "received_events_url": "https://api.github.com/users/razvanab/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
6
2024-02-16T13:25:19
2024-04-05T12:24:32
2024-02-16T14:10:21
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I get this error in Windows ollama preview when I try to run "ollama serve." Error: listen tcp 127.0.0.1:11434: bind: Only one usage of each socket address (protocol/network address/port) is normally permitted.
{ "login": "razvanab", "id": 2854730, "node_id": "MDQ6VXNlcjI4NTQ3MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/2854730?v=4", "gravatar_id": "", "url": "https://api.github.com/users/razvanab", "html_url": "https://github.com/razvanab", "followers_url": "https://api.github.com/users/razvanab/followers", "following_url": "https://api.github.com/users/razvanab/following{/other_user}", "gists_url": "https://api.github.com/users/razvanab/gists{/gist_id}", "starred_url": "https://api.github.com/users/razvanab/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/razvanab/subscriptions", "organizations_url": "https://api.github.com/users/razvanab/orgs", "repos_url": "https://api.github.com/users/razvanab/repos", "events_url": "https://api.github.com/users/razvanab/events{/privacy}", "received_events_url": "https://api.github.com/users/razvanab/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2540/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2540/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4465
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4465/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4465/comments
https://api.github.com/repos/ollama/ollama/issues/4465/events
https://github.com/ollama/ollama/pull/4465
2,299,125,591
PR_kwDOJ0Z1Ps5vmRmh
4,465
Update install.sh added /etc/default/ollama and create template
{ "login": "digitalw00t", "id": 593045, "node_id": "MDQ6VXNlcjU5MzA0NQ==", "avatar_url": "https://avatars.githubusercontent.com/u/593045?v=4", "gravatar_id": "", "url": "https://api.github.com/users/digitalw00t", "html_url": "https://github.com/digitalw00t", "followers_url": "https://api.github.com/users/digitalw00t/followers", "following_url": "https://api.github.com/users/digitalw00t/following{/other_user}", "gists_url": "https://api.github.com/users/digitalw00t/gists{/gist_id}", "starred_url": "https://api.github.com/users/digitalw00t/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/digitalw00t/subscriptions", "organizations_url": "https://api.github.com/users/digitalw00t/orgs", "repos_url": "https://api.github.com/users/digitalw00t/repos", "events_url": "https://api.github.com/users/digitalw00t/events{/privacy}", "received_events_url": "https://api.github.com/users/digitalw00t/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
0
2024-05-16T01:33:58
2024-05-16T01:33:58
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4465", "html_url": "https://github.com/ollama/ollama/pull/4465", "diff_url": "https://github.com/ollama/ollama/pull/4465.diff", "patch_url": "https://github.com/ollama/ollama/pull/4465.patch", "merged_at": null }
It will update the ollama.service to use an env file. It will create a default /etc/default/ollama if it doesn't already exist.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4465/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4465/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8613
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8613/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8613/comments
https://api.github.com/repos/ollama/ollama/issues/8613/events
https://github.com/ollama/ollama/issues/8613
2,813,831,969
I_kwDOJ0Z1Ps6nt6sh
8,613
[v0.5.4] Download timeouts cause download cache corruption. Any download that needs to be retried by re-running ollama ends up corrupted at 100% download(file sha256-sha256hash-partial-0 not found).
{ "login": "esperanza-esperanza", "id": 196695882, "node_id": "U_kgDOC7lXSg", "avatar_url": "https://avatars.githubusercontent.com/u/196695882?v=4", "gravatar_id": "", "url": "https://api.github.com/users/esperanza-esperanza", "html_url": "https://github.com/esperanza-esperanza", "followers_url": "https://api.github.com/users/esperanza-esperanza/followers", "following_url": "https://api.github.com/users/esperanza-esperanza/following{/other_user}", "gists_url": "https://api.github.com/users/esperanza-esperanza/gists{/gist_id}", "starred_url": "https://api.github.com/users/esperanza-esperanza/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/esperanza-esperanza/subscriptions", "organizations_url": "https://api.github.com/users/esperanza-esperanza/orgs", "repos_url": "https://api.github.com/users/esperanza-esperanza/repos", "events_url": "https://api.github.com/users/esperanza-esperanza/events{/privacy}", "received_events_url": "https://api.github.com/users/esperanza-esperanza/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
2
2025-01-27T19:04:50
2025-01-27T19:40:50
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Running ollama through alpaca. I'm aware this is a seperate project will mirror the bug report. ### OS Linux ### GPU AMD ### CPU AMD ### Ollama version 0.5.4
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8613/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8613/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/7898
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7898/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7898/comments
https://api.github.com/repos/ollama/ollama/issues/7898/events
https://github.com/ollama/ollama/pull/7898
2,708,111,976
PR_kwDOJ0Z1Ps6DpCsb
7,898
cmd: don't rely on reading repo file for display test
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-11-30T21:59:39
2024-11-30T22:12:55
2024-11-30T22:12:53
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7898", "html_url": "https://github.com/ollama/ollama/pull/7898", "diff_url": "https://github.com/ollama/ollama/pull/7898.diff", "patch_url": "https://github.com/ollama/ollama/pull/7898.patch", "merged_at": "2024-11-30T22:12:53" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7898/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7898/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4363
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4363/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4363/comments
https://api.github.com/repos/ollama/ollama/issues/4363/events
https://github.com/ollama/ollama/pull/4363
2,290,939,376
PR_kwDOJ0Z1Ps5vKVIX
4,363
Install rocm packages on host system on dnf based system
{ "login": "ericcurtin", "id": 1694275, "node_id": "MDQ6VXNlcjE2OTQyNzU=", "avatar_url": "https://avatars.githubusercontent.com/u/1694275?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ericcurtin", "html_url": "https://github.com/ericcurtin", "followers_url": "https://api.github.com/users/ericcurtin/followers", "following_url": "https://api.github.com/users/ericcurtin/following{/other_user}", "gists_url": "https://api.github.com/users/ericcurtin/gists{/gist_id}", "starred_url": "https://api.github.com/users/ericcurtin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ericcurtin/subscriptions", "organizations_url": "https://api.github.com/users/ericcurtin/orgs", "repos_url": "https://api.github.com/users/ericcurtin/repos", "events_url": "https://api.github.com/users/ericcurtin/events{/privacy}", "received_events_url": "https://api.github.com/users/ericcurtin/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
4
2024-05-11T14:32:36
2024-05-22T20:04:26
null
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4363", "html_url": "https://github.com/ollama/ollama/pull/4363", "diff_url": "https://github.com/ollama/ollama/pull/4363.diff", "patch_url": "https://github.com/ollama/ollama/pull/4363.patch", "merged_at": null }
Was using ollama in podman containers via podman-ollama on Fedora Kinoite 40, it didn't work until I installed these packages on the base OS and rebooted.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4363/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4363/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5612
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5612/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5612/comments
https://api.github.com/repos/ollama/ollama/issues/5612/events
https://github.com/ollama/ollama/pull/5612
2,401,680,694
PR_kwDOJ0Z1Ps51BGEL
5,612
chatglm graph
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-07-10T20:44:11
2024-07-10T21:18:34
2024-07-10T21:18:33
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5612", "html_url": "https://github.com/ollama/ollama/pull/5612", "diff_url": "https://github.com/ollama/ollama/pull/5612.diff", "patch_url": "https://github.com/ollama/ollama/pull/5612.patch", "merged_at": "2024-07-10T21:18:33" }
null
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5612/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5612/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8427
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8427/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8427/comments
https://api.github.com/repos/ollama/ollama/issues/8427/events
https://github.com/ollama/ollama/issues/8427
2,788,163,304
I_kwDOJ0Z1Ps6mL_7o
8,427
0.5.5 Model Creation on Windows Seems Broken
{ "login": "github-pmj", "id": 77807618, "node_id": "MDQ6VXNlcjc3ODA3NjE4", "avatar_url": "https://avatars.githubusercontent.com/u/77807618?v=4", "gravatar_id": "", "url": "https://api.github.com/users/github-pmj", "html_url": "https://github.com/github-pmj", "followers_url": "https://api.github.com/users/github-pmj/followers", "following_url": "https://api.github.com/users/github-pmj/following{/other_user}", "gists_url": "https://api.github.com/users/github-pmj/gists{/gist_id}", "starred_url": "https://api.github.com/users/github-pmj/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/github-pmj/subscriptions", "organizations_url": "https://api.github.com/users/github-pmj/orgs", "repos_url": "https://api.github.com/users/github-pmj/repos", "events_url": "https://api.github.com/users/github-pmj/events{/privacy}", "received_events_url": "https://api.github.com/users/github-pmj/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
5
2025-01-14T19:52:57
2025-01-21T20:32:45
2025-01-15T03:01:25
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Version 0.5.5 seems to have broken create functionality on Windows. I use a PowerShell script to create new models with different templates. The first step in the script is to run ` ollama create '<model name>' -f <full path to model file>` The model file is a one line file ` FROM <full path to GGUF>` With 0.5.5 it errors on model creation as it mangles the file path (seems to pre-pend the OLLAMA_MODELS environment variable to the GGUF path). When I modify the model file to take this into account and run ` ollama create 'testmodel1' -f <full path to model file>` directly I get an 'Invalid model name' error. Rolling back to 0.5.4 resolves the issue. I'm using the Ollama setup installer. ### OS Windows ### GPU Nvidia ### CPU AMD ### Ollama version 0.5.5
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8427/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8427/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/165
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/165/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/165/comments
https://api.github.com/repos/ollama/ollama/issues/165/events
https://github.com/ollama/ollama/issues/165
1,816,339,516
I_kwDOJ0Z1Ps5sQyQ8
165
Make it simple to add new models from Huggingface
{ "login": "joshlewis", "id": 226778, "node_id": "MDQ6VXNlcjIyNjc3OA==", "avatar_url": "https://avatars.githubusercontent.com/u/226778?v=4", "gravatar_id": "", "url": "https://api.github.com/users/joshlewis", "html_url": "https://github.com/joshlewis", "followers_url": "https://api.github.com/users/joshlewis/followers", "following_url": "https://api.github.com/users/joshlewis/following{/other_user}", "gists_url": "https://api.github.com/users/joshlewis/gists{/gist_id}", "starred_url": "https://api.github.com/users/joshlewis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/joshlewis/subscriptions", "organizations_url": "https://api.github.com/users/joshlewis/orgs", "repos_url": "https://api.github.com/users/joshlewis/repos", "events_url": "https://api.github.com/users/joshlewis/events{/privacy}", "received_events_url": "https://api.github.com/users/joshlewis/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2023-07-21T20:18:56
2023-07-21T23:14:07
2023-07-21T23:12:59
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I love ollama! This is sweet. I'm a beginner with LLM stuff. I see ollama has built-in support for several models like Nous-Hermes, Orca Mini, and a couple varieties of Llama2. I'd love it if it were just as easy (or at least almost as easy?) to add support for other models we find on huggingface.co with a simple command. Then once I had done that, I could run `ollama pull whatevermodel` and it would just do what it was supposed to do.
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/165/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/165/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5883
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5883/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5883/comments
https://api.github.com/repos/ollama/ollama/issues/5883/events
https://github.com/ollama/ollama/issues/5883
2,425,797,891
I_kwDOJ0Z1Ps6Qlr0D
5,883
Sometimes the model (or ollama) returns nothing - model: llama 3.1 8B
{ "login": "llagerlof", "id": 193798, "node_id": "MDQ6VXNlcjE5Mzc5OA==", "avatar_url": "https://avatars.githubusercontent.com/u/193798?v=4", "gravatar_id": "", "url": "https://api.github.com/users/llagerlof", "html_url": "https://github.com/llagerlof", "followers_url": "https://api.github.com/users/llagerlof/followers", "following_url": "https://api.github.com/users/llagerlof/following{/other_user}", "gists_url": "https://api.github.com/users/llagerlof/gists{/gist_id}", "starred_url": "https://api.github.com/users/llagerlof/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/llagerlof/subscriptions", "organizations_url": "https://api.github.com/users/llagerlof/orgs", "repos_url": "https://api.github.com/users/llagerlof/repos", "events_url": "https://api.github.com/users/llagerlof/events{/privacy}", "received_events_url": "https://api.github.com/users/llagerlof/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-07-23T18:00:19
2024-07-23T18:23:16
2024-07-23T18:23:15
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? In a simple conversation, 50/40% of the time it returns nothing. I just run the model normally: `ollama run llama3.1` And after a successful start, I make some random questions. Sometimes it responds. Some times it returns empty. This is not happening with llama3 8B model. My NVIDIA has 12GB and the PC has 32GB. ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.2.5
{ "login": "llagerlof", "id": 193798, "node_id": "MDQ6VXNlcjE5Mzc5OA==", "avatar_url": "https://avatars.githubusercontent.com/u/193798?v=4", "gravatar_id": "", "url": "https://api.github.com/users/llagerlof", "html_url": "https://github.com/llagerlof", "followers_url": "https://api.github.com/users/llagerlof/followers", "following_url": "https://api.github.com/users/llagerlof/following{/other_user}", "gists_url": "https://api.github.com/users/llagerlof/gists{/gist_id}", "starred_url": "https://api.github.com/users/llagerlof/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/llagerlof/subscriptions", "organizations_url": "https://api.github.com/users/llagerlof/orgs", "repos_url": "https://api.github.com/users/llagerlof/repos", "events_url": "https://api.github.com/users/llagerlof/events{/privacy}", "received_events_url": "https://api.github.com/users/llagerlof/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5883/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5883/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8579
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8579/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8579/comments
https://api.github.com/repos/ollama/ollama/issues/8579/events
https://github.com/ollama/ollama/issues/8579
2,810,930,718
I_kwDOJ0Z1Ps6ni2Ye
8,579
llama.cpp server API compatibility
{ "login": "shishkin", "id": 124065, "node_id": "MDQ6VXNlcjEyNDA2NQ==", "avatar_url": "https://avatars.githubusercontent.com/u/124065?v=4", "gravatar_id": "", "url": "https://api.github.com/users/shishkin", "html_url": "https://github.com/shishkin", "followers_url": "https://api.github.com/users/shishkin/followers", "following_url": "https://api.github.com/users/shishkin/following{/other_user}", "gists_url": "https://api.github.com/users/shishkin/gists{/gist_id}", "starred_url": "https://api.github.com/users/shishkin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shishkin/subscriptions", "organizations_url": "https://api.github.com/users/shishkin/orgs", "repos_url": "https://api.github.com/users/shishkin/repos", "events_url": "https://api.github.com/users/shishkin/events{/privacy}", "received_events_url": "https://api.github.com/users/shishkin/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
0
2025-01-25T11:17:10
2025-01-25T11:17:10
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
This seems like an obvious thing but I could not find an existing issue for that. It would be nice if Ollama API had a compatibility layer with [llama.cpp server API](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md#api-endpoints). I wanted to try out the new [llama.vscode](https://github.com/ggml-org/llama.vscode) extension and could figure out how to make it work with ollama-served models. I would like to avoid having a zoo of model servers and would prefer to just rely on ollama.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8579/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8579/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/5430
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5430/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5430/comments
https://api.github.com/repos/ollama/ollama/issues/5430/events
https://github.com/ollama/ollama/issues/5430
2,385,861,969
I_kwDOJ0Z1Ps6ONV1R
5,430
Unable to load model because of incorrect model path
{ "login": "whichxjy", "id": 33963637, "node_id": "MDQ6VXNlcjMzOTYzNjM3", "avatar_url": "https://avatars.githubusercontent.com/u/33963637?v=4", "gravatar_id": "", "url": "https://api.github.com/users/whichxjy", "html_url": "https://github.com/whichxjy", "followers_url": "https://api.github.com/users/whichxjy/followers", "following_url": "https://api.github.com/users/whichxjy/following{/other_user}", "gists_url": "https://api.github.com/users/whichxjy/gists{/gist_id}", "starred_url": "https://api.github.com/users/whichxjy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/whichxjy/subscriptions", "organizations_url": "https://api.github.com/users/whichxjy/orgs", "repos_url": "https://api.github.com/users/whichxjy/repos", "events_url": "https://api.github.com/users/whichxjy/events{/privacy}", "received_events_url": "https://api.github.com/users/whichxjy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 6677367769, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCL2Q", "url": "https://api.github.com/repos/ollama/ollama/labels/needs%20more%20info", "name": "needs more info", "color": "BA8041", "default": false, "description": "More information is needed to assist" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
3
2024-07-02T10:23:26
2024-07-05T23:07:21
2024-07-05T23:07:20
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? After downloading model with `ollama pull llama3`, calling API `POST /api/chat` with `ollama serve` running would get this error: `model 'llama3' not found, try pulling it first` > related issue: #3876 Problem: - When I run `ollama pull llama3`, the `OLLAMA_MODELS` env is not set. Then this model path is `/usr/share/ollama/.ollama/models`. - But when I run `ollama serve`, the `OLLAMA_MODELS` env would be `~/.ollama/models`. Because of that, calling `POST /api/chat` could not find the model downloaded before. I solved it when I set `OLLAMA_MODELS` to `/usr/share/ollama/.ollama/models` when running `ollama serve`. ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.1.48
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5430/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5430/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8266
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8266/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8266/comments
https://api.github.com/repos/ollama/ollama/issues/8266/events
https://github.com/ollama/ollama/issues/8266
2,762,374,148
I_kwDOJ0Z1Ps6kpnwE
8,266
Installation and data directory should be customizable by user
{ "login": "gnusupport", "id": 24825387, "node_id": "MDQ6VXNlcjI0ODI1Mzg3", "avatar_url": "https://avatars.githubusercontent.com/u/24825387?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gnusupport", "html_url": "https://github.com/gnusupport", "followers_url": "https://api.github.com/users/gnusupport/followers", "following_url": "https://api.github.com/users/gnusupport/following{/other_user}", "gists_url": "https://api.github.com/users/gnusupport/gists{/gist_id}", "starred_url": "https://api.github.com/users/gnusupport/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gnusupport/subscriptions", "organizations_url": "https://api.github.com/users/gnusupport/orgs", "repos_url": "https://api.github.com/users/gnusupport/repos", "events_url": "https://api.github.com/users/gnusupport/events{/privacy}", "received_events_url": "https://api.github.com/users/gnusupport/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396220, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA", "url": "https://api.github.com/repos/ollama/ollama/labels/question", "name": "question", "color": "d876e3", "default": true, "description": "General questions" } ]
closed
false
null
[]
null
6
2024-12-29T19:14:44
2024-12-30T00:49:08
2024-12-30T00:16:30
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Too little hard disk, and I cannot install ollama files. I should be able to say WHERE shall ollama be installed and WHERE the data files like models should be installed. And I do not like notion of installing it system wide, rather as user. People have various hard disks, I have many hard disks and mount points, I do not keep everything on /usr Please consider this proposal. ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version _No response_
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8266/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8266/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8469
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8469/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8469/comments
https://api.github.com/repos/ollama/ollama/issues/8469/events
https://github.com/ollama/ollama/issues/8469
2,794,760,197
I_kwDOJ0Z1Ps6mlKgF
8,469
Semantic recognition or semantic classification.
{ "login": "20246688", "id": 156653831, "node_id": "U_kgDOCVZZBw", "avatar_url": "https://avatars.githubusercontent.com/u/156653831?v=4", "gravatar_id": "", "url": "https://api.github.com/users/20246688", "html_url": "https://github.com/20246688", "followers_url": "https://api.github.com/users/20246688/followers", "following_url": "https://api.github.com/users/20246688/following{/other_user}", "gists_url": "https://api.github.com/users/20246688/gists{/gist_id}", "starred_url": "https://api.github.com/users/20246688/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/20246688/subscriptions", "organizations_url": "https://api.github.com/users/20246688/orgs", "repos_url": "https://api.github.com/users/20246688/repos", "events_url": "https://api.github.com/users/20246688/events{/privacy}", "received_events_url": "https://api.github.com/users/20246688/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
9
2025-01-17T07:20:59
2025-01-22T01:34:11
2025-01-22T01:34:11
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
When working on tools, is there a way to incorporate semantic recognition or semantic classification based on user text before model inference?
{ "login": "20246688", "id": 156653831, "node_id": "U_kgDOCVZZBw", "avatar_url": "https://avatars.githubusercontent.com/u/156653831?v=4", "gravatar_id": "", "url": "https://api.github.com/users/20246688", "html_url": "https://github.com/20246688", "followers_url": "https://api.github.com/users/20246688/followers", "following_url": "https://api.github.com/users/20246688/following{/other_user}", "gists_url": "https://api.github.com/users/20246688/gists{/gist_id}", "starred_url": "https://api.github.com/users/20246688/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/20246688/subscriptions", "organizations_url": "https://api.github.com/users/20246688/orgs", "repos_url": "https://api.github.com/users/20246688/repos", "events_url": "https://api.github.com/users/20246688/events{/privacy}", "received_events_url": "https://api.github.com/users/20246688/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8469/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8469/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3545
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3545/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3545/comments
https://api.github.com/repos/ollama/ollama/issues/3545/events
https://github.com/ollama/ollama/issues/3545
2,232,498,451
I_kwDOJ0Z1Ps6FETkT
3,545
Red Hat Linux 9 install error
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6678628138, "node_id": "LA_kwDOJ0Z1Ps8AAAABjhPHKg", "url": "https://api.github.com/repos/ollama/ollama/labels/install", "name": "install", "color": "E0B88D", "default": false, "description": "" } ]
closed
false
null
[]
null
1
2024-04-09T02:41:26
2024-07-25T16:08:33
2024-07-25T16:08:27
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ``` sudo: dkms: command not found modprobe: FATAL: Module nvidia not found in directory /lib/modules/5.14.0-362.18.1.el9_3.x86_64 ``` ### What did you expect to see? _No response_ ### Steps to reproduce _No response_ ### Are there any recent changes that introduced the issue? _No response_ ### OS _No response_ ### Architecture _No response_ ### Platform _No response_ ### Ollama version _No response_ ### GPU _No response_ ### GPU info _No response_ ### CPU _No response_ ### Other software _No response_
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3545/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3545/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1331
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1331/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1331/comments
https://api.github.com/repos/ollama/ollama/issues/1331/events
https://github.com/ollama/ollama/pull/1331
2,018,848,970
PR_kwDOJ0Z1Ps5gykV9
1,331
Add OpenAI compatible API chat completions
{ "login": "vaayne", "id": 10231735, "node_id": "MDQ6VXNlcjEwMjMxNzM1", "avatar_url": "https://avatars.githubusercontent.com/u/10231735?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vaayne", "html_url": "https://github.com/vaayne", "followers_url": "https://api.github.com/users/vaayne/followers", "following_url": "https://api.github.com/users/vaayne/following{/other_user}", "gists_url": "https://api.github.com/users/vaayne/gists{/gist_id}", "starred_url": "https://api.github.com/users/vaayne/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vaayne/subscriptions", "organizations_url": "https://api.github.com/users/vaayne/orgs", "repos_url": "https://api.github.com/users/vaayne/repos", "events_url": "https://api.github.com/users/vaayne/events{/privacy}", "received_events_url": "https://api.github.com/users/vaayne/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
10
2023-11-30T14:56:24
2024-05-18T02:35:39
2024-02-20T03:24:04
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1331", "html_url": "https://github.com/ollama/ollama/pull/1331", "diff_url": "https://github.com/ollama/ollama/pull/1331.diff", "patch_url": "https://github.com/ollama/ollama/pull/1331.patch", "merged_at": null }
fix for issue https://github.com/jmorganca/ollama/issues/305 ## API example <img width="1254" alt="image" src="https://github.com/jmorganca/ollama/assets/10231735/6e9a73a7-40f2-4526-9897-ed4b76a4e615"> ## Stream Response <img width="1246" alt="image" src="https://github.com/jmorganca/ollama/assets/10231735/1c03fb26-8448-444c-a989-e0b70543a4a1">
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1331/reactions", "total_count": 15, "+1": 7, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 5, "rocket": 3, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1331/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7849
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7849/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7849/comments
https://api.github.com/repos/ollama/ollama/issues/7849/events
https://github.com/ollama/ollama/pull/7849
2,696,109,684
PR_kwDOJ0Z1Ps6DPo4b
7,849
runner.go: Don't try to extract image tags for text models
{ "login": "jessegross", "id": 6468499, "node_id": "MDQ6VXNlcjY0Njg0OTk=", "avatar_url": "https://avatars.githubusercontent.com/u/6468499?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jessegross", "html_url": "https://github.com/jessegross", "followers_url": "https://api.github.com/users/jessegross/followers", "following_url": "https://api.github.com/users/jessegross/following{/other_user}", "gists_url": "https://api.github.com/users/jessegross/gists{/gist_id}", "starred_url": "https://api.github.com/users/jessegross/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jessegross/subscriptions", "organizations_url": "https://api.github.com/users/jessegross/orgs", "repos_url": "https://api.github.com/users/jessegross/repos", "events_url": "https://api.github.com/users/jessegross/events{/privacy}", "received_events_url": "https://api.github.com/users/jessegross/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-11-26T21:14:06
2024-11-26T21:23:26
2024-11-26T21:23:24
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7849", "html_url": "https://github.com/ollama/ollama/pull/7849", "diff_url": "https://github.com/ollama/ollama/pull/7849.diff", "patch_url": "https://github.com/ollama/ollama/pull/7849.patch", "merged_at": "2024-11-26T21:23:24" }
When processing a prompt, we look for image tags of the form [img-0], which are inserted by the Ollama server process. However, this can cause errors if the original prompt has these tags - typically an image not found error is returned. This changes tag searching behavior to be similar to the 0.3.x series, which will largely avoid these problems. However,they can still happen when input text with these tags is used with image models. The correct solution is to escape the tags but this is a larger issue with special sequences in general so this is an incremental fix that should avoid the problem for the majority of cases.
{ "login": "jessegross", "id": 6468499, "node_id": "MDQ6VXNlcjY0Njg0OTk=", "avatar_url": "https://avatars.githubusercontent.com/u/6468499?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jessegross", "html_url": "https://github.com/jessegross", "followers_url": "https://api.github.com/users/jessegross/followers", "following_url": "https://api.github.com/users/jessegross/following{/other_user}", "gists_url": "https://api.github.com/users/jessegross/gists{/gist_id}", "starred_url": "https://api.github.com/users/jessegross/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jessegross/subscriptions", "organizations_url": "https://api.github.com/users/jessegross/orgs", "repos_url": "https://api.github.com/users/jessegross/repos", "events_url": "https://api.github.com/users/jessegross/events{/privacy}", "received_events_url": "https://api.github.com/users/jessegross/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7849/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7849/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/541
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/541/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/541/comments
https://api.github.com/repos/ollama/ollama/issues/541/events
https://github.com/ollama/ollama/pull/541
1,899,542,196
PR_kwDOJ0Z1Ps5af4Gy
541
Cmd changes
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2023-09-16T19:20:17
2023-09-18T19:31:42
2023-09-18T19:26:56
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/541", "html_url": "https://github.com/ollama/ollama/pull/541", "diff_url": "https://github.com/ollama/ollama/pull/541.diff", "patch_url": "https://github.com/ollama/ollama/pull/541.patch", "merged_at": "2023-09-18T19:26:56" }
This changes the API so that it can use `POST /api/generate` to pre-load a model if the prompt is empty. It also changes the REPL so that it can pre-load the model by making a call to generate automatically, and includes a change for using placeholder text.
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/541/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/541/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2109
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2109/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2109/comments
https://api.github.com/repos/ollama/ollama/issues/2109/events
https://github.com/ollama/ollama/issues/2109
2,092,012,686
I_kwDOJ0Z1Ps58sZSO
2,109
Support loading multiple models at the same time
{ "login": "Picaso2", "id": 78003984, "node_id": "MDQ6VXNlcjc4MDAzOTg0", "avatar_url": "https://avatars.githubusercontent.com/u/78003984?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Picaso2", "html_url": "https://github.com/Picaso2", "followers_url": "https://api.github.com/users/Picaso2/followers", "following_url": "https://api.github.com/users/Picaso2/following{/other_user}", "gists_url": "https://api.github.com/users/Picaso2/gists{/gist_id}", "starred_url": "https://api.github.com/users/Picaso2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Picaso2/subscriptions", "organizations_url": "https://api.github.com/users/Picaso2/orgs", "repos_url": "https://api.github.com/users/Picaso2/repos", "events_url": "https://api.github.com/users/Picaso2/events{/privacy}", "received_events_url": "https://api.github.com/users/Picaso2/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
18
2024-01-20T11:06:39
2024-06-09T14:52:06
2024-04-23T15:31:39
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
is it possible to create one model from multiple models? or even load multiple models?
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2109/reactions", "total_count": 6, "+1": 6, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2109/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3956
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3956/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3956/comments
https://api.github.com/repos/ollama/ollama/issues/3956/events
https://github.com/ollama/ollama/pull/3956
2,266,441,670
PR_kwDOJ0Z1Ps5t4NVt
3,956
.github/workflows: add in-flight cancellations on new push
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-04-26T20:13:25
2024-04-26T20:54:25
2024-04-26T20:54:25
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3956", "html_url": "https://github.com/ollama/ollama/pull/3956", "diff_url": "https://github.com/ollama/ollama/pull/3956.diff", "patch_url": "https://github.com/ollama/ollama/pull/3956.patch", "merged_at": "2024-04-26T20:54:25" }
null
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3956/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3956/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5200
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5200/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5200/comments
https://api.github.com/repos/ollama/ollama/issues/5200/events
https://github.com/ollama/ollama/issues/5200
2,366,329,161
I_kwDOJ0Z1Ps6NC1FJ
5,200
Add support for stream_options
{ "login": "igo", "id": 55597, "node_id": "MDQ6VXNlcjU1NTk3", "avatar_url": "https://avatars.githubusercontent.com/u/55597?v=4", "gravatar_id": "", "url": "https://api.github.com/users/igo", "html_url": "https://github.com/igo", "followers_url": "https://api.github.com/users/igo/followers", "following_url": "https://api.github.com/users/igo/following{/other_user}", "gists_url": "https://api.github.com/users/igo/gists{/gist_id}", "starred_url": "https://api.github.com/users/igo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/igo/subscriptions", "organizations_url": "https://api.github.com/users/igo/orgs", "repos_url": "https://api.github.com/users/igo/repos", "events_url": "https://api.github.com/users/igo/events{/privacy}", "received_events_url": "https://api.github.com/users/igo/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 7706482389, "node_id": "LA_kwDOJ0Z1Ps8AAAABy1eW1Q", "url": "https://api.github.com/repos/ollama/ollama/labels/api", "name": "api", "color": "bfdadc", "default": false, "description": "" } ]
closed
false
null
[]
null
2
2024-06-21T11:20:41
2024-12-13T01:09:31
2024-12-13T01:09:31
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
OpenAI added support for token stats in a streamed response. Would be great to have similar feature in Ollama. https://community.openai.com/t/usage-stats-now-available-when-using-streaming-with-the-chat-completions-api-or-completions-api/738156 https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream_options
{ "login": "ParthSareen", "id": 29360864, "node_id": "MDQ6VXNlcjI5MzYwODY0", "avatar_url": "https://avatars.githubusercontent.com/u/29360864?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ParthSareen", "html_url": "https://github.com/ParthSareen", "followers_url": "https://api.github.com/users/ParthSareen/followers", "following_url": "https://api.github.com/users/ParthSareen/following{/other_user}", "gists_url": "https://api.github.com/users/ParthSareen/gists{/gist_id}", "starred_url": "https://api.github.com/users/ParthSareen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ParthSareen/subscriptions", "organizations_url": "https://api.github.com/users/ParthSareen/orgs", "repos_url": "https://api.github.com/users/ParthSareen/repos", "events_url": "https://api.github.com/users/ParthSareen/events{/privacy}", "received_events_url": "https://api.github.com/users/ParthSareen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5200/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5200/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6615
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6615/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6615/comments
https://api.github.com/repos/ollama/ollama/issues/6615/events
https://github.com/ollama/ollama/pull/6615
2,503,826,090
PR_kwDOJ0Z1Ps56UIu0
6,615
api: add Client.BaseURL method
{ "login": "presbrey", "id": 133399, "node_id": "MDQ6VXNlcjEzMzM5OQ==", "avatar_url": "https://avatars.githubusercontent.com/u/133399?v=4", "gravatar_id": "", "url": "https://api.github.com/users/presbrey", "html_url": "https://github.com/presbrey", "followers_url": "https://api.github.com/users/presbrey/followers", "following_url": "https://api.github.com/users/presbrey/following{/other_user}", "gists_url": "https://api.github.com/users/presbrey/gists{/gist_id}", "starred_url": "https://api.github.com/users/presbrey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/presbrey/subscriptions", "organizations_url": "https://api.github.com/users/presbrey/orgs", "repos_url": "https://api.github.com/users/presbrey/repos", "events_url": "https://api.github.com/users/presbrey/events{/privacy}", "received_events_url": "https://api.github.com/users/presbrey/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
3
2024-09-03T21:32:00
2024-09-04T13:47:57
null
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6615", "html_url": "https://github.com/ollama/ollama/pull/6615", "diff_url": "https://github.com/ollama/ollama/pull/6615.diff", "patch_url": "https://github.com/ollama/ollama/pull/6615.patch", "merged_at": null }
This method is useful in identifying and integrating an `api.Client` that has already been setup by another package or caller. We return a copy of `base` to prevent any modification from interacting with the `api.Client`.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6615/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6615/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2461
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2461/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2461/comments
https://api.github.com/repos/ollama/ollama/issues/2461/events
https://github.com/ollama/ollama/issues/2461
2,129,868,494
I_kwDOJ0Z1Ps5-8zbO
2,461
Source for the ollama/quantize docker image missing in the repo
{ "login": "netroy", "id": 196144, "node_id": "MDQ6VXNlcjE5NjE0NA==", "avatar_url": "https://avatars.githubusercontent.com/u/196144?v=4", "gravatar_id": "", "url": "https://api.github.com/users/netroy", "html_url": "https://github.com/netroy", "followers_url": "https://api.github.com/users/netroy/followers", "following_url": "https://api.github.com/users/netroy/following{/other_user}", "gists_url": "https://api.github.com/users/netroy/gists{/gist_id}", "starred_url": "https://api.github.com/users/netroy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/netroy/subscriptions", "organizations_url": "https://api.github.com/users/netroy/orgs", "repos_url": "https://api.github.com/users/netroy/repos", "events_url": "https://api.github.com/users/netroy/events{/privacy}", "received_events_url": "https://api.github.com/users/netroy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-02-12T10:48:40
2024-03-12T02:14:28
2024-03-12T02:14:28
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi, I was trying to make some changes to that `ollama/quantize` only to realize that there was no Dockerfile for that image in this repo.
{ "login": "hoyyeva", "id": 63033505, "node_id": "MDQ6VXNlcjYzMDMzNTA1", "avatar_url": "https://avatars.githubusercontent.com/u/63033505?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hoyyeva", "html_url": "https://github.com/hoyyeva", "followers_url": "https://api.github.com/users/hoyyeva/followers", "following_url": "https://api.github.com/users/hoyyeva/following{/other_user}", "gists_url": "https://api.github.com/users/hoyyeva/gists{/gist_id}", "starred_url": "https://api.github.com/users/hoyyeva/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hoyyeva/subscriptions", "organizations_url": "https://api.github.com/users/hoyyeva/orgs", "repos_url": "https://api.github.com/users/hoyyeva/repos", "events_url": "https://api.github.com/users/hoyyeva/events{/privacy}", "received_events_url": "https://api.github.com/users/hoyyeva/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2461/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2461/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6320
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6320/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6320/comments
https://api.github.com/repos/ollama/ollama/issues/6320/events
https://github.com/ollama/ollama/issues/6320
2,460,734,373
I_kwDOJ0Z1Ps6Sq9Ol
6,320
Ubuntu based rocm images
{ "login": "kunaltyagi", "id": 2657068, "node_id": "MDQ6VXNlcjI2NTcwNjg=", "avatar_url": "https://avatars.githubusercontent.com/u/2657068?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kunaltyagi", "html_url": "https://github.com/kunaltyagi", "followers_url": "https://api.github.com/users/kunaltyagi/followers", "following_url": "https://api.github.com/users/kunaltyagi/following{/other_user}", "gists_url": "https://api.github.com/users/kunaltyagi/gists{/gist_id}", "starred_url": "https://api.github.com/users/kunaltyagi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kunaltyagi/subscriptions", "organizations_url": "https://api.github.com/users/kunaltyagi/orgs", "repos_url": "https://api.github.com/users/kunaltyagi/repos", "events_url": "https://api.github.com/users/kunaltyagi/events{/privacy}", "received_events_url": "https://api.github.com/users/kunaltyagi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
3
2024-08-12T11:25:18
2024-10-16T20:16:02
2024-09-02T23:48:50
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Currently, rocm image is based on centos. Could we add ubuntu based rocm images as well?
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6320/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6320/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6204
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6204/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6204/comments
https://api.github.com/repos/ollama/ollama/issues/6204/events
https://github.com/ollama/ollama/issues/6204
2,451,296,387
I_kwDOJ0Z1Ps6SG9CD
6,204
The Quickstart section in README is missing the 'ollama start' command
{ "login": "yurivict", "id": 271906, "node_id": "MDQ6VXNlcjI3MTkwNg==", "avatar_url": "https://avatars.githubusercontent.com/u/271906?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yurivict", "html_url": "https://github.com/yurivict", "followers_url": "https://api.github.com/users/yurivict/followers", "following_url": "https://api.github.com/users/yurivict/following{/other_user}", "gists_url": "https://api.github.com/users/yurivict/gists{/gist_id}", "starred_url": "https://api.github.com/users/yurivict/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yurivict/subscriptions", "organizations_url": "https://api.github.com/users/yurivict/orgs", "repos_url": "https://api.github.com/users/yurivict/repos", "events_url": "https://api.github.com/users/yurivict/events{/privacy}", "received_events_url": "https://api.github.com/users/yurivict/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
22
2024-08-06T16:40:55
2024-09-05T19:05:36
2024-09-05T19:01:22
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? People who run ollama for the first time wouldn't know that 'ollama start' needs to be run. ### OS Linux ### GPU _No response_ ### CPU Intel ### Ollama version 0.3.4
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6204/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6204/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3626
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3626/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3626/comments
https://api.github.com/repos/ollama/ollama/issues/3626/events
https://github.com/ollama/ollama/pull/3626
2,241,624,969
PR_kwDOJ0Z1Ps5skgGy
3,626
Add podman-ollama to terminal apps
{ "login": "ericcurtin", "id": 1694275, "node_id": "MDQ6VXNlcjE2OTQyNzU=", "avatar_url": "https://avatars.githubusercontent.com/u/1694275?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ericcurtin", "html_url": "https://github.com/ericcurtin", "followers_url": "https://api.github.com/users/ericcurtin/followers", "following_url": "https://api.github.com/users/ericcurtin/following{/other_user}", "gists_url": "https://api.github.com/users/ericcurtin/gists{/gist_id}", "starred_url": "https://api.github.com/users/ericcurtin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ericcurtin/subscriptions", "organizations_url": "https://api.github.com/users/ericcurtin/orgs", "repos_url": "https://api.github.com/users/ericcurtin/repos", "events_url": "https://api.github.com/users/ericcurtin/events{/privacy}", "received_events_url": "https://api.github.com/users/ericcurtin/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-04-13T15:31:06
2024-04-23T07:05:42
2024-04-23T00:13:23
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3626", "html_url": "https://github.com/ollama/ollama/pull/3626", "diff_url": "https://github.com/ollama/ollama/pull/3626.diff", "patch_url": "https://github.com/ollama/ollama/pull/3626.patch", "merged_at": "2024-04-23T00:13:23" }
The goal of podman-ollama is to make AI even more boring.
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3626/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 1, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3626/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2767
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2767/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2767/comments
https://api.github.com/repos/ollama/ollama/issues/2767/events
https://github.com/ollama/ollama/issues/2767
2,154,654,640
I_kwDOJ0Z1Ps6AbWuw
2,767
Fully unload GPU memory on NVIDIA non-VMM GPUs when idle
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-02-26T16:56:40
2024-04-02T17:49:46
2024-04-02T17:49:46
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
The fix for #1848 works for VMM GPUs, but still leaves remaining memory allocations for non-VMM GPUs.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2767/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2767/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7004
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7004/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7004/comments
https://api.github.com/repos/ollama/ollama/issues/7004/events
https://github.com/ollama/ollama/issues/7004
2,553,284,630
I_kwDOJ0Z1Ps6YMAgW
7,004
Allow CodeLlama to use tools
{ "login": "mikegehard", "id": 128210, "node_id": "MDQ6VXNlcjEyODIxMA==", "avatar_url": "https://avatars.githubusercontent.com/u/128210?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mikegehard", "html_url": "https://github.com/mikegehard", "followers_url": "https://api.github.com/users/mikegehard/followers", "following_url": "https://api.github.com/users/mikegehard/following{/other_user}", "gists_url": "https://api.github.com/users/mikegehard/gists{/gist_id}", "starred_url": "https://api.github.com/users/mikegehard/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mikegehard/subscriptions", "organizations_url": "https://api.github.com/users/mikegehard/orgs", "repos_url": "https://api.github.com/users/mikegehard/repos", "events_url": "https://api.github.com/users/mikegehard/events{/privacy}", "received_events_url": "https://api.github.com/users/mikegehard/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
4
2024-09-27T16:08:14
2024-11-17T15:02:09
2024-11-17T15:02:09
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I would like to build a tool that uses test output to help an AI assistant iteratively perform a software refactoring. In order to do this, I need to allow the model to run the code and I was thinking that an external tool would be perfect for that. Be as of today, it doesn't look like the CodeLlama model has that capability? It looks like Llama3.2 has that ability so I'm not completely blocked but I thought that using a model tuned for code/software development would give me better results.
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7004/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7004/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7934
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7934/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7934/comments
https://api.github.com/repos/ollama/ollama/issues/7934/events
https://github.com/ollama/ollama/issues/7934
2,718,958,042
I_kwDOJ0Z1Ps6iEAHa
7,934
Blocker message are not Blocking explicit requests.
{ "login": "meninblack111", "id": 190666281, "node_id": "U_kgDOC11WKQ", "avatar_url": "https://avatars.githubusercontent.com/u/190666281?v=4", "gravatar_id": "", "url": "https://api.github.com/users/meninblack111", "html_url": "https://github.com/meninblack111", "followers_url": "https://api.github.com/users/meninblack111/followers", "following_url": "https://api.github.com/users/meninblack111/following{/other_user}", "gists_url": "https://api.github.com/users/meninblack111/gists{/gist_id}", "starred_url": "https://api.github.com/users/meninblack111/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/meninblack111/subscriptions", "organizations_url": "https://api.github.com/users/meninblack111/orgs", "repos_url": "https://api.github.com/users/meninblack111/repos", "events_url": "https://api.github.com/users/meninblack111/events{/privacy}", "received_events_url": "https://api.github.com/users/meninblack111/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
8
2024-12-04T23:04:46
2024-12-04T23:23:50
2024-12-04T23:23:06
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? If i am testing Ollama ability to block explicit requests , it will block it once but then if I tell it to write it again it ignores the blocker/filter. ### OS Linux, Windows ### GPU Nvidia, AMD ### CPU AMD ### Ollama version 0.3.13
{ "login": "meninblack111", "id": 190666281, "node_id": "U_kgDOC11WKQ", "avatar_url": "https://avatars.githubusercontent.com/u/190666281?v=4", "gravatar_id": "", "url": "https://api.github.com/users/meninblack111", "html_url": "https://github.com/meninblack111", "followers_url": "https://api.github.com/users/meninblack111/followers", "following_url": "https://api.github.com/users/meninblack111/following{/other_user}", "gists_url": "https://api.github.com/users/meninblack111/gists{/gist_id}", "starred_url": "https://api.github.com/users/meninblack111/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/meninblack111/subscriptions", "organizations_url": "https://api.github.com/users/meninblack111/orgs", "repos_url": "https://api.github.com/users/meninblack111/repos", "events_url": "https://api.github.com/users/meninblack111/events{/privacy}", "received_events_url": "https://api.github.com/users/meninblack111/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7934/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7934/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/266
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/266/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/266/comments
https://api.github.com/repos/ollama/ollama/issues/266/events
https://github.com/ollama/ollama/issues/266
1,834,606,284
I_kwDOJ0Z1Ps5tWd7M
266
Building on Linux fails
{ "login": "kisamoto", "id": 1744908, "node_id": "MDQ6VXNlcjE3NDQ5MDg=", "avatar_url": "https://avatars.githubusercontent.com/u/1744908?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kisamoto", "html_url": "https://github.com/kisamoto", "followers_url": "https://api.github.com/users/kisamoto/followers", "following_url": "https://api.github.com/users/kisamoto/following{/other_user}", "gists_url": "https://api.github.com/users/kisamoto/gists{/gist_id}", "starred_url": "https://api.github.com/users/kisamoto/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kisamoto/subscriptions", "organizations_url": "https://api.github.com/users/kisamoto/orgs", "repos_url": "https://api.github.com/users/kisamoto/repos", "events_url": "https://api.github.com/users/kisamoto/events{/privacy}", "received_events_url": "https://api.github.com/users/kisamoto/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-08-03T09:01:27
2023-08-03T09:11:28
2023-08-03T09:11:28
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
## System info Fedora 38 64GB Memory `go version go1.20 linux/amd64` ## Reproduction steps 1. Clone repo 2. Run `go build .` ## Expected result Ollama successfully builds ## Actual result Error trace (below) ### Full stack trace ``` # github.com/jmorganca/ollama/llama In file included from /usr/include/c++/13/string:51, from llama-util.h:41, from llama.cpp:35: In static member function ‘static void std::__copy_move<false, false, std::random_access_iterator_tag>::__assign_one(_Tp*, _Up*) [with _Tp = const llama_grammar_element*; _Up = const llama_grammar_element* const]’, inlined from ‘static _Up* std::__copy_move<_IsMove, true, std::random_access_iterator_tag>::__copy_m(_Tp*, _Tp*, _Up*) [with _Tp = const llama_grammar_element* const; _Up = const llama_grammar_element*; bool _IsMove = false]’ at /usr/include/c++/13/bits/stl_algobase.h:440:20, inlined from ‘_OI std::__copy_move_a2(_II, _II, _OI) [with bool _IsMove = false; _II = const llama_grammar_element* const*; _OI = const llama_grammar_element**]’ at /usr/include/c++/13/bits/stl_algobase.h:506:30, inlined from ‘_OI std::__copy_move_a1(_II, _II, _OI) [with bool _IsMove = false; _II = const llama_grammar_element* const*; _OI = const llama_grammar_element**]’ at /usr/include/c++/13/bits/stl_algobase.h:533:42, inlined from ‘_OI std::__copy_move_a(_II, _II, _OI) [with bool _IsMove = false; _II = __gnu_cxx::__normal_iterator<const llama_grammar_element* const*, vector<const llama_grammar_element*> >; _OI = const llama_grammar_element**]’ at /usr/include/c++/13/bits/stl_algobase.h:540:31, inlined from ‘_OI std::copy(_II, _II, _OI) [with _II = __gnu_cxx::__normal_iterator<const llama_grammar_element* const*, vector<const llama_grammar_element*> >; _OI = const llama_grammar_element**]’ at /usr/include/c++/13/bits/stl_algobase.h:633:7, inlined from ‘static _ForwardIterator std::__uninitialized_copy<true>::__uninit_copy(_InputIterator, _InputIterator, _ForwardIterator) [with _InputIterator = __gnu_cxx::__normal_iterator<const llama_grammar_element* const*, std::vector<const llama_grammar_element*> >; _ForwardIterator = const llama_grammar_element**]’ at /usr/include/c++/13/bits/stl_uninitialized.h:147:27, inlined from ‘_ForwardIterator std::uninitialized_copy(_InputIterator, _InputIterator, _ForwardIterator) [with _InputIterator = __gnu_cxx::__normal_iterator<const llama_grammar_element* const*, vector<const llama_grammar_element*> >; _ForwardIterator = const llama_grammar_element**]’ at /usr/include/c++/13/bits/stl_uninitialized.h:185:15, inlined from ‘_ForwardIterator std::__uninitialized_copy_a(_InputIterator, _InputIterator, _ForwardIterator, allocator<_Tp>&) [with _InputIterator = __gnu_cxx::__normal_iterator<const llama_grammar_element* const*, vector<const llama_grammar_element*> >; _ForwardIterator = const llama_grammar_element**; _Tp = const llama_grammar_element*]’ at /usr/include/c++/13/bits/stl_uninitialized.h:373:37, inlined from ‘std::vector<_Tp, _Alloc>::vector(const std::vector<_Tp, _Alloc>&) [with _Tp = const llama_grammar_element*; _Alloc = std::allocator<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/stl_vector.h:603:31, inlined from ‘void std::__new_allocator<_Tp>::construct(_Up*, _Args&& ...) [with _Up = std::vector<const llama_grammar_element*>; _Args = {const std::vector<const llama_grammar_element*, std::allocator<const llama_grammar_element*> >&}; _Tp = std::vector<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/new_allocator.h:187:4, inlined from ‘static void std::allocator_traits<std::allocator<_CharT> >::construct(allocator_type&, _Up*, _Args&& ...) [with _Up = std::vector<const llama_grammar_element*>; _Args = {const std::vector<const llama_grammar_element*, std::allocator<const llama_grammar_element*> >&}; _Tp = std::vector<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/alloc_traits.h:537:17, inlined from ‘void std::vector<_Tp, _Alloc>::push_back(const value_type&) [with _Tp = std::vector<const llama_grammar_element*>; _Alloc = std::allocator<std::vector<const llama_grammar_element*> >]’ at /usr/include/c++/13/bits/stl_vector.h:1283:30, inlined from ‘void llama_grammar_advance_stack(const std::vector<std::vector<llama_grammar_element> >&, const std::vector<const llama_grammar_element*>&, std::vector<std::vector<const llama_grammar_element*> >&)’ at llama.cpp:2186:29: /usr/include/c++/13/bits/stl_algobase.h:398:17: warning: array subscript 0 is outside array bounds of ‘const llama_grammar_element* [0]’ [-Warray-bounds=] 398 | { *__to = *__from; } | ~~~~~~^~~~~~~~~ In file included from /usr/include/c++/13/x86_64-redhat-linux/bits/c++allocator.h:33, from /usr/include/c++/13/bits/allocator.h:46, from /usr/include/c++/13/string:43: In member function ‘_Tp* std::__new_allocator<_Tp>::allocate(size_type, const void*) [with _Tp = const llama_grammar_element*]’, inlined from ‘static _Tp* std::allocator_traits<std::allocator<_CharT> >::allocate(allocator_type&, size_type) [with _Tp = const llama_grammar_element*]’ at /usr/include/c++/13/bits/alloc_traits.h:482:28, inlined from ‘std::_Vector_base<_Tp, _Alloc>::pointer std::_Vector_base<_Tp, _Alloc>::_M_allocate(std::size_t) [with _Tp = const llama_grammar_element*; _Alloc = std::allocator<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/stl_vector.h:378:33, inlined from ‘std::_Vector_base<_Tp, _Alloc>::pointer std::_Vector_base<_Tp, _Alloc>::_M_allocate(std::size_t) [with _Tp = const llama_grammar_element*; _Alloc = std::allocator<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/stl_vector.h:375:7, inlined from ‘void std::_Vector_base<_Tp, _Alloc>::_M_create_storage(std::size_t) [with _Tp = const llama_grammar_element*; _Alloc = std::allocator<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/stl_vector.h:395:44, inlined from ‘std::_Vector_base<_Tp, _Alloc>::_Vector_base(std::size_t, const allocator_type&) [with _Tp = const llama_grammar_element*; _Alloc = std::allocator<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/stl_vector.h:332:26, inlined from ‘std::vector<_Tp, _Alloc>::vector(const std::vector<_Tp, _Alloc>&) [with _Tp = const llama_grammar_element*; _Alloc = std::allocator<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/stl_vector.h:600:61, inlined from ‘void std::__new_allocator<_Tp>::construct(_Up*, _Args&& ...) [with _Up = std::vector<const llama_grammar_element*>; _Args = {const std::vector<const llama_grammar_element*, std::allocator<const llama_grammar_element*> >&}; _Tp = std::vector<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/new_allocator.h:187:4, inlined from ‘static void std::allocator_traits<std::allocator<_CharT> >::construct(allocator_type&, _Up*, _Args&& ...) [with _Up = std::vector<const llama_grammar_element*>; _Args = {const std::vector<const llama_grammar_element*, std::allocator<const llama_grammar_element*> >&}; _Tp = std::vector<const llama_grammar_element*>]’ at /usr/include/c++/13/bits/alloc_traits.h:537:17, inlined from ‘void std::vector<_Tp, _Alloc>::push_back(const value_type&) [with _Tp = std::vector<const llama_grammar_element*>; _Alloc = std::allocator<std::vector<const llama_grammar_element*> >]’ at /usr/include/c++/13/bits/stl_vector.h:1283:30, inlined from ‘void llama_grammar_advance_stack(const std::vector<std::vector<llama_grammar_element> >&, const std::vector<const llama_grammar_element*>&, std::vector<std::vector<const llama_grammar_element*> >&)’ at llama.cpp:2186:29: /usr/include/c++/13/bits/new_allocator.h:147:55: note: object of size 0 allocated by ‘operator new’ 147 | return static_cast<_Tp*>(_GLIBCXX_OPERATOR_NEW(__n * sizeof(_Tp))); | ^ ```
{ "login": "kisamoto", "id": 1744908, "node_id": "MDQ6VXNlcjE3NDQ5MDg=", "avatar_url": "https://avatars.githubusercontent.com/u/1744908?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kisamoto", "html_url": "https://github.com/kisamoto", "followers_url": "https://api.github.com/users/kisamoto/followers", "following_url": "https://api.github.com/users/kisamoto/following{/other_user}", "gists_url": "https://api.github.com/users/kisamoto/gists{/gist_id}", "starred_url": "https://api.github.com/users/kisamoto/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kisamoto/subscriptions", "organizations_url": "https://api.github.com/users/kisamoto/orgs", "repos_url": "https://api.github.com/users/kisamoto/repos", "events_url": "https://api.github.com/users/kisamoto/events{/privacy}", "received_events_url": "https://api.github.com/users/kisamoto/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/266/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/266/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3373
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3373/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3373/comments
https://api.github.com/repos/ollama/ollama/issues/3373/events
https://github.com/ollama/ollama/issues/3373
2,211,280,062
I_kwDOJ0Z1Ps6DzXS-
3,373
Mobaxterm teminal not working
{ "login": "jackel66", "id": 19675411, "node_id": "MDQ6VXNlcjE5Njc1NDEx", "avatar_url": "https://avatars.githubusercontent.com/u/19675411?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jackel66", "html_url": "https://github.com/jackel66", "followers_url": "https://api.github.com/users/jackel66/followers", "following_url": "https://api.github.com/users/jackel66/following{/other_user}", "gists_url": "https://api.github.com/users/jackel66/gists{/gist_id}", "starred_url": "https://api.github.com/users/jackel66/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jackel66/subscriptions", "organizations_url": "https://api.github.com/users/jackel66/orgs", "repos_url": "https://api.github.com/users/jackel66/repos", "events_url": "https://api.github.com/users/jackel66/events{/privacy}", "received_events_url": "https://api.github.com/users/jackel66/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-03-27T16:41:08
2024-04-15T19:38:29
2024-04-15T19:38:29
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
When using the windows preview and opening a Mobaxterm terminal i execute the ollama.exe run llama2 but nothing happens.. its like its hung for some reason. I attempted to install the LLM using a regular CMD prompt in windows and it downloads it fine and executes but will not work in moba. It says use your favorite terminal but maybe this is a limitation?
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3373/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3373/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2053
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2053/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2053/comments
https://api.github.com/repos/ollama/ollama/issues/2053/events
https://github.com/ollama/ollama/issues/2053
2,088,623,214
I_kwDOJ0Z1Ps58fdxu
2,053
Request -> Remote server deployment tutorial w/ API access for AI apps
{ "login": "squatchydev9000", "id": 39635337, "node_id": "MDQ6VXNlcjM5NjM1MzM3", "avatar_url": "https://avatars.githubusercontent.com/u/39635337?v=4", "gravatar_id": "", "url": "https://api.github.com/users/squatchydev9000", "html_url": "https://github.com/squatchydev9000", "followers_url": "https://api.github.com/users/squatchydev9000/followers", "following_url": "https://api.github.com/users/squatchydev9000/following{/other_user}", "gists_url": "https://api.github.com/users/squatchydev9000/gists{/gist_id}", "starred_url": "https://api.github.com/users/squatchydev9000/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/squatchydev9000/subscriptions", "organizations_url": "https://api.github.com/users/squatchydev9000/orgs", "repos_url": "https://api.github.com/users/squatchydev9000/repos", "events_url": "https://api.github.com/users/squatchydev9000/events{/privacy}", "received_events_url": "https://api.github.com/users/squatchydev9000/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
15
2024-01-18T16:16:33
2024-05-29T20:44:56
2024-03-11T18:05:31
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hey Ollama team, thx for all that you guys are doing. Question/Request: can you please demonstrate how we can deploy Ollama to a remote server -> I have using ssh but I cannot, for the life of me, figure out how to build it into an api I can use with autogen/crewai/superagi/etc... **I bet many are also stuck here**. Sure we can get things going locally, but almost no one actually owns an m3 mac to run things locally... so local dev is tough... and for production AI apps we need an API solution for a remote Ollama install... I believe the world needs Ollama and open sourced options more than ever as the big corporations are pushing us towards the abyss... an API/Deployment tutorial or package would be the keystone in protecting humanity from the big corps...
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2053/reactions", "total_count": 6, "+1": 6, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2053/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4297
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4297/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4297/comments
https://api.github.com/repos/ollama/ollama/issues/4297/events
https://github.com/ollama/ollama/issues/4297
2,288,393,155
I_kwDOJ0Z1Ps6IZhvD
4,297
Please update go module github.com/chewxy/math32 to the last
{ "login": "HougeLangley", "id": 1161594, "node_id": "MDQ6VXNlcjExNjE1OTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/1161594?v=4", "gravatar_id": "", "url": "https://api.github.com/users/HougeLangley", "html_url": "https://github.com/HougeLangley", "followers_url": "https://api.github.com/users/HougeLangley/followers", "following_url": "https://api.github.com/users/HougeLangley/following{/other_user}", "gists_url": "https://api.github.com/users/HougeLangley/gists{/gist_id}", "starred_url": "https://api.github.com/users/HougeLangley/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/HougeLangley/subscriptions", "organizations_url": "https://api.github.com/users/HougeLangley/orgs", "repos_url": "https://api.github.com/users/HougeLangley/repos", "events_url": "https://api.github.com/users/HougeLangley/events{/privacy}", "received_events_url": "https://api.github.com/users/HougeLangley/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-05-09T20:17:44
2024-06-28T16:13:19
2024-05-11T04:39:28
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? https://github.com/chewxy/math32/issues/46#issuecomment-2103347015 ``` sipeed @ lpi4a in ~/ollama on git:main o [3:10:57] $ go generate ./... go: downloading go1.22.0 (linux/riscv64) go: downloading github.com/google/uuid v1.0.0 go: downloading golang.org/x/crypto v0.14.0 go: downloading google.golang.org/protobuf v1.30.0 go: downloading github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1 go: downloading github.com/mitchellh/mapstructure v1.5.0 go: downloading github.com/nlpodyssey/gopickle v0.3.0 go: downloading github.com/pdevine/tensor v0.0.0-20240228013915-64ccaa8d9ca9 go: downloading github.com/x448/float16 v0.8.4 go: downloading golang.org/x/sys v0.13.0 go: downloading golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 go: downloading golang.org/x/sync v0.3.0 go: downloading github.com/gin-gonic/gin v1.9.1 go: downloading golang.org/x/term v0.13.0 go: downloading github.com/emirpasic/gods v1.18.1 go: downloading github.com/gin-contrib/cors v1.4.0 go: downloading github.com/containerd/console v1.0.3 go: downloading github.com/olekukonko/tablewriter v0.0.5 go: downloading github.com/spf13/cobra v1.7.0 go: downloading golang.org/x/text v0.14.0 go: downloading github.com/apache/arrow/go/arrow v0.0.0-20201229220542-30ce2eb5d4dc go: downloading github.com/chewxy/hm v1.0.0 go: downloading github.com/chewxy/math32 v1.0.8 go: downloading github.com/google/flatbuffers v1.12.0 go: downloading github.com/pkg/errors v0.9.1 go: downloading go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 go: downloading gonum.org/v1/gonum v0.8.2 go: downloading gorgonia.org/vecf32 v0.9.0 go: downloading gorgonia.org/vecf64 v0.9.0 go: downloading github.com/gin-contrib/sse v0.1.0 go: downloading github.com/mattn/go-isatty v0.0.19 go: downloading golang.org/x/net v0.17.0 go: downloading github.com/mattn/go-runewidth v0.0.14 go: downloading github.com/spf13/pflag v1.0.5 go: downloading golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 go: downloading github.com/xtgo/set v1.0.0 go: downloading github.com/gogo/protobuf v1.3.2 go: downloading github.com/golang/protobuf v1.5.0 go: downloading github.com/go-playground/validator/v10 v10.14.0 go: downloading github.com/pelletier/go-toml/v2 v2.0.8 go: downloading github.com/ugorji/go/codec v1.2.11 go: downloading gopkg.in/yaml.v3 v3.0.1 go: downloading github.com/rivo/uniseg v0.2.0 go: downloading github.com/gabriel-vasile/mimetype v1.4.2 go: downloading github.com/go-playground/universal-translator v0.18.1 go: downloading github.com/leodido/go-urn v1.2.4 go: downloading github.com/go-playground/locales v0.14.1 + set -o pipefail + echo 'Starting linux generate script' Starting linux generate script + '[' -z '' ']' + '[' -x /usr/local/cuda/bin/nvcc ']' ++ command -v nvcc + export CUDACXX= + CUDACXX= + COMMON_CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off' ++ dirname ./gen_linux.sh + source ./gen_common.sh + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '' ']' + CMAKE_CUDA_ARCHITECTURES='50;52;61;70;75;80' + git_module_setup + '[' -n '' ']' + '[' -d ../llama.cpp/gguf ']' + git submodule init + git submodule update --force ../llama.cpp 子模组路径 '../llama.cpp':检出 '952d03dbead16e4dbdd1d3458486340673cc2465' + apply_patches + grep ollama ../llama.cpp/CMakeLists.txt + echo 'add_subdirectory(../ext_server ext_server) # ollama' ++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff + '[' -n '../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff' ']' + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/02-clip-log.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/03-load_exception.diff ++ cut -f2- -d/ ++ cut -f2 '-d ' + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/04-metal.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout ggml-metal.m 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/05-clip-fix.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/02-clip-log.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/03-load_exception.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/04-metal.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/05-clip-fix.diff + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + '[' -z '' -o '' = static ']' + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + CMAKE_TARGETS='--target llama --target ggml' + CMAKE_DEFS='-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + BUILD_DIR=../build/linux/riscv64_static + echo 'Building static library' Building static library + build + cmake -S ../llama.cpp -B ../build/linux/riscv64_static -DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off -- The C compiler identification is GNU 13.2.0 -- The CXX compiler identification is GNU 13.2.0 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found Git: /usr/bin/git (found version "2.40.1") -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF -- CMAKE_SYSTEM_PROCESSOR: riscv64 -- Unknown architecture -- Configuring done -- Generating done -- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64_static + cmake --build ../build/linux/riscv64_static --target llama --target ggml -j8 [ 33%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o [ 33%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o [ 50%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o [ 50%] Building C object CMakeFiles/ggml.dir/ggml.c.o [ 50%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o [ 50%] Built target ggml [ 83%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [ 83%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o [ 83%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o [100%] Linking CXX static library libllama.a [100%] Built target llama [100%] Built target ggml + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + '[' -z '' ']' + '[' -n '' ']' + COMMON_CPU_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off' + '[' -z '' -o '' = cpu ']' + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + BUILD_DIR=../build/linux/riscv64/cpu + echo 'Building LCD CPU' Building LCD CPU + build + cmake -S ../llama.cpp -B ../build/linux/riscv64/cpu -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off -- The C compiler identification is GNU 13.2.0 -- The CXX compiler identification is GNU 13.2.0 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found Git: /usr/bin/git (found version "2.40.1") -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF -- CMAKE_SYSTEM_PROCESSOR: riscv64 -- Unknown architecture -- Configuring done -- Generating done -- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64/cpu + cmake --build ../build/linux/riscv64/cpu --target ollama_llama_server -j8 [ 6%] Generating build details from Git [ 6%] Building C object CMakeFiles/ggml.dir/ggml.c.o [ 12%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o [ 18%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o [ 18%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o -- Found Git: /usr/bin/git (found version "2.40.1") [ 25%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o [ 31%] Building CXX object common/CMakeFiles/build_info.dir/build-info.cpp.o [ 31%] Built target build_info [ 31%] Built target ggml [ 37%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [ 43%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o [ 43%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o [ 50%] Linking CXX static library libllama.a [ 50%] Built target llama [ 56%] Building CXX object examples/llava/CMakeFiles/llava.dir/clip.cpp.o [ 62%] Building CXX object examples/llava/CMakeFiles/llava.dir/llava.cpp.o [ 56%] Building CXX object common/CMakeFiles/common.dir/common.cpp.o [ 68%] Building CXX object common/CMakeFiles/common.dir/console.cpp.o [ 75%] Building CXX object common/CMakeFiles/common.dir/sampling.cpp.o [ 81%] Building CXX object common/CMakeFiles/common.dir/json-schema-to-grammar.cpp.o [ 81%] Building CXX object common/CMakeFiles/common.dir/grammar-parser.cpp.o [ 87%] Building CXX object common/CMakeFiles/common.dir/train.cpp.o [ 87%] Building CXX object common/CMakeFiles/common.dir/ngram-cache.cpp.o [ 93%] Linking CXX static library libcommon.a [ 93%] Built target llava [ 93%] Built target common [ 93%] Building CXX object ext_server/CMakeFiles/ollama_llama_server.dir/server.cpp.o [100%] Linking CXX executable ../bin/ollama_llama_server [100%] Built target ollama_llama_server + compress + echo 'Compressing payloads to reduce overall binary size...' Compressing payloads to reduce overall binary size... + pids= + rm -rf '../build/linux/riscv64/cpu/bin/*.gz' + for f in ${BUILD_DIR}/bin/* + pids+=' 2078' + '[' -d ../build/linux/riscv64/cpu/lib ']' + gzip -n --best -f ../build/linux/riscv64/cpu/bin/ollama_llama_server + echo + for pid in ${pids} + wait 2078 + echo 'Finished compression' Finished compression + '[' riscv64 == x86_64 ']' + '[' -z '' ']' + '[' -d /usr/local/cuda/lib64 ']' + '[' -z '' ']' + '[' -d /opt/cuda/targets/x86_64-linux/lib ']' + '[' -z '' ']' + CUDART_LIB_DIR= + '[' -d '' ']' + '[' -z '' ']' + ROCM_PATH=/opt/rocm + '[' -z '' ']' + '[' -d /usr/lib/cmake/CLBlast ']' + '[' -d /opt/rocm ']' + cleanup + cd ../llama.cpp/ + git checkout CMakeLists.txt 从索引区更新了 1 个路径 ++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff + '[' -n '../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff' ']' + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/02-clip-log.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/03-load_exception.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/04-metal.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout ggml-metal.m 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/05-clip-fix.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 ++ cd ../build/linux/riscv64/cpu/.. ++ echo cpu + echo 'go generate completed. LLM runners: cpu' go generate completed. LLM runners: cpu # sipeed @ lpi4a in ~/ollama on git:main o [3:27:53] $ go build . # github.com/chewxy/math32 ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/exp.go:3:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/exp.go:57:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/sqrt.go:3:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/log.go:76:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/remainder.go:33:6: missing function body ``` ### OS Linux ### GPU Other ### CPU Other ### Ollama version 0.1.34
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4297/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4297/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5749
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5749/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5749/comments
https://api.github.com/repos/ollama/ollama/issues/5749/events
https://github.com/ollama/ollama/issues/5749
2,413,702,883
I_kwDOJ0Z1Ps6P3i7j
5,749
how to run in only GPU mode
{ "login": "janglichao", "id": 1237692, "node_id": "MDQ6VXNlcjEyMzc2OTI=", "avatar_url": "https://avatars.githubusercontent.com/u/1237692?v=4", "gravatar_id": "", "url": "https://api.github.com/users/janglichao", "html_url": "https://github.com/janglichao", "followers_url": "https://api.github.com/users/janglichao/followers", "following_url": "https://api.github.com/users/janglichao/following{/other_user}", "gists_url": "https://api.github.com/users/janglichao/gists{/gist_id}", "starred_url": "https://api.github.com/users/janglichao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/janglichao/subscriptions", "organizations_url": "https://api.github.com/users/janglichao/orgs", "repos_url": "https://api.github.com/users/janglichao/repos", "events_url": "https://api.github.com/users/janglichao/events{/privacy}", "received_events_url": "https://api.github.com/users/janglichao/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6430601766, "node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg", "url": "https://api.github.com/repos/ollama/ollama/labels/nvidia", "name": "nvidia", "color": "8CDB00", "default": false, "description": "Issues relating to Nvidia GPUs and CUDA" }, { "id": 6677367769, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCL2Q", "url": "https://api.github.com/repos/ollama/ollama/labels/needs%20more%20info", "name": "needs more info", "color": "BA8041", "default": false, "description": "More information is needed to assist" } ]
open
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
14
2024-07-17T13:59:16
2025-01-26T18:53:01
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? my model sometime run half on cpu half on gpu,when I run ollam ps command it shows 49% on cpu 51% on GPU,how can I config to run model always only on gpu mode but disable on cpu? pls help me ### OS Linux ### GPU _No response_ ### CPU _No response_ ### Ollama version _No response_
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5749/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/5749/timeline
null
reopened
false
https://api.github.com/repos/ollama/ollama/issues/7914
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7914/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7914/comments
https://api.github.com/repos/ollama/ollama/issues/7914/events
https://github.com/ollama/ollama/issues/7914
2,714,208,839
I_kwDOJ0Z1Ps6hx4pH
7,914
how to use Modefile build customer model when gguf file have sevral sub files?
{ "login": "cqray1990", "id": 32585434, "node_id": "MDQ6VXNlcjMyNTg1NDM0", "avatar_url": "https://avatars.githubusercontent.com/u/32585434?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cqray1990", "html_url": "https://github.com/cqray1990", "followers_url": "https://api.github.com/users/cqray1990/followers", "following_url": "https://api.github.com/users/cqray1990/following{/other_user}", "gists_url": "https://api.github.com/users/cqray1990/gists{/gist_id}", "starred_url": "https://api.github.com/users/cqray1990/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cqray1990/subscriptions", "organizations_url": "https://api.github.com/users/cqray1990/orgs", "repos_url": "https://api.github.com/users/cqray1990/repos", "events_url": "https://api.github.com/users/cqray1990/events{/privacy}", "received_events_url": "https://api.github.com/users/cqray1990/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
1
2024-12-03T07:25:24
2024-12-14T15:38:31
2024-12-14T15:38:31
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? like this: qwen2.5-instruct-0001.gguf qwen2.5-instruct-0002.gguf ### OS _No response_ ### GPU _No response_ ### CPU _No response_ ### Ollama version _No response_
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7914/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7914/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3404
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3404/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3404/comments
https://api.github.com/repos/ollama/ollama/issues/3404/events
https://github.com/ollama/ollama/issues/3404
2,215,049,906
I_kwDOJ0Z1Ps6EBvqy
3,404
Command R model works very slow on MAC
{ "login": "Zig1375", "id": 2699034, "node_id": "MDQ6VXNlcjI2OTkwMzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/2699034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Zig1375", "html_url": "https://github.com/Zig1375", "followers_url": "https://api.github.com/users/Zig1375/followers", "following_url": "https://api.github.com/users/Zig1375/following{/other_user}", "gists_url": "https://api.github.com/users/Zig1375/gists{/gist_id}", "starred_url": "https://api.github.com/users/Zig1375/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Zig1375/subscriptions", "organizations_url": "https://api.github.com/users/Zig1375/orgs", "repos_url": "https://api.github.com/users/Zig1375/repos", "events_url": "https://api.github.com/users/Zig1375/events{/privacy}", "received_events_url": "https://api.github.com/users/Zig1375/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
10
2024-03-29T10:03:37
2024-06-02T21:22:36
2024-06-02T21:22:36
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? The Command R model runs very slowly on a Mac (with an M2 Pro CPU and 32GB of RAM). It utilizes only 80-90% of the CPU, out of a possible 1200% (which results in processing about 1 token every 20-30 seconds). However, on a Windows 11 machine (equipped with an Nvidia 4070 GPU), it runs very quickly (processing about 5-10 tokens per second). Previously, with other models, the situation was the opposite. The Mac ran much faster, even surpassing the performance of the Windows machine. ### What did you expect to see? Mac works faster, at least a few tokens per second. ### Steps to reproduce Install Command R model on mac m2 pro. ### Are there any recent changes that introduced the issue? _No response_ ### OS macOS ### Architecture arm64 ### Platform _No response_ ### Ollama version 0.1.30 ### GPU Apple ### GPU info _No response_ ### CPU Apple ### Other software _No response_
{ "login": "Zig1375", "id": 2699034, "node_id": "MDQ6VXNlcjI2OTkwMzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/2699034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Zig1375", "html_url": "https://github.com/Zig1375", "followers_url": "https://api.github.com/users/Zig1375/followers", "following_url": "https://api.github.com/users/Zig1375/following{/other_user}", "gists_url": "https://api.github.com/users/Zig1375/gists{/gist_id}", "starred_url": "https://api.github.com/users/Zig1375/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Zig1375/subscriptions", "organizations_url": "https://api.github.com/users/Zig1375/orgs", "repos_url": "https://api.github.com/users/Zig1375/repos", "events_url": "https://api.github.com/users/Zig1375/events{/privacy}", "received_events_url": "https://api.github.com/users/Zig1375/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3404/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3404/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4541
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4541/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4541/comments
https://api.github.com/repos/ollama/ollama/issues/4541/events
https://github.com/ollama/ollama/issues/4541
2,306,351,994
I_kwDOJ0Z1Ps6JeCN6
4,541
Ollama reload same model when called in different python scripts
{ "login": "x66ccff", "id": 45335927, "node_id": "MDQ6VXNlcjQ1MzM1OTI3", "avatar_url": "https://avatars.githubusercontent.com/u/45335927?v=4", "gravatar_id": "", "url": "https://api.github.com/users/x66ccff", "html_url": "https://github.com/x66ccff", "followers_url": "https://api.github.com/users/x66ccff/followers", "following_url": "https://api.github.com/users/x66ccff/following{/other_user}", "gists_url": "https://api.github.com/users/x66ccff/gists{/gist_id}", "starred_url": "https://api.github.com/users/x66ccff/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/x66ccff/subscriptions", "organizations_url": "https://api.github.com/users/x66ccff/orgs", "repos_url": "https://api.github.com/users/x66ccff/repos", "events_url": "https://api.github.com/users/x66ccff/events{/privacy}", "received_events_url": "https://api.github.com/users/x66ccff/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
5
2024-05-20T16:33:07
2024-05-21T03:30:42
2024-05-21T03:30:42
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I am running the qwen:32b model on dual RTX A6000 GPUs (48GB each). There seems to be sufficient VRAM available, with cuda0 using 21GB and cuda1 using less than 5GB. According to the logs, all layers of the model have been loaded onto the GPUs. When I call the ollama library in a single Python script, it works as expected. However, when I try to call ollama from two different Python scripts simultaneously, both requiring the same qwen:32b model, ollama appears to be reloading the same model repeatedly for each API call from the different scripts. I'm puzzled as to why this behavior is occurring. ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.1.36
{ "login": "x66ccff", "id": 45335927, "node_id": "MDQ6VXNlcjQ1MzM1OTI3", "avatar_url": "https://avatars.githubusercontent.com/u/45335927?v=4", "gravatar_id": "", "url": "https://api.github.com/users/x66ccff", "html_url": "https://github.com/x66ccff", "followers_url": "https://api.github.com/users/x66ccff/followers", "following_url": "https://api.github.com/users/x66ccff/following{/other_user}", "gists_url": "https://api.github.com/users/x66ccff/gists{/gist_id}", "starred_url": "https://api.github.com/users/x66ccff/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/x66ccff/subscriptions", "organizations_url": "https://api.github.com/users/x66ccff/orgs", "repos_url": "https://api.github.com/users/x66ccff/repos", "events_url": "https://api.github.com/users/x66ccff/events{/privacy}", "received_events_url": "https://api.github.com/users/x66ccff/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4541/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4541/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7487
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7487/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7487/comments
https://api.github.com/repos/ollama/ollama/issues/7487/events
https://github.com/ollama/ollama/issues/7487
2,631,764,606
I_kwDOJ0Z1Ps6c3Yp-
7,487
Llama3.2 always returns tools calls and empty contents
{ "login": "ouariachi", "id": 92974022, "node_id": "U_kgDOBYqrxg", "avatar_url": "https://avatars.githubusercontent.com/u/92974022?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ouariachi", "html_url": "https://github.com/ouariachi", "followers_url": "https://api.github.com/users/ouariachi/followers", "following_url": "https://api.github.com/users/ouariachi/following{/other_user}", "gists_url": "https://api.github.com/users/ouariachi/gists{/gist_id}", "starred_url": "https://api.github.com/users/ouariachi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ouariachi/subscriptions", "organizations_url": "https://api.github.com/users/ouariachi/orgs", "repos_url": "https://api.github.com/users/ouariachi/repos", "events_url": "https://api.github.com/users/ouariachi/events{/privacy}", "received_events_url": "https://api.github.com/users/ouariachi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-11-04T03:52:21
2024-11-04T06:37:36
2024-11-04T06:37:36
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When tools are included in the body of a request to the Ollama API, Llama3.2 always returns tool_calls in its response, even if the user's message does not require it. ## Request ![imagen_2024-11-04_044920916](https://github.com/user-attachments/assets/c08df62b-5038-4d0c-ab0c-6a7266695552) ## Response ![imagen_2024-11-04_044941389](https://github.com/user-attachments/assets/665e4007-7bc3-412d-a21c-1fcb21f9453d) ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.3.14
{ "login": "ouariachi", "id": 92974022, "node_id": "U_kgDOBYqrxg", "avatar_url": "https://avatars.githubusercontent.com/u/92974022?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ouariachi", "html_url": "https://github.com/ouariachi", "followers_url": "https://api.github.com/users/ouariachi/followers", "following_url": "https://api.github.com/users/ouariachi/following{/other_user}", "gists_url": "https://api.github.com/users/ouariachi/gists{/gist_id}", "starred_url": "https://api.github.com/users/ouariachi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ouariachi/subscriptions", "organizations_url": "https://api.github.com/users/ouariachi/orgs", "repos_url": "https://api.github.com/users/ouariachi/repos", "events_url": "https://api.github.com/users/ouariachi/events{/privacy}", "received_events_url": "https://api.github.com/users/ouariachi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7487/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7487/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/3384
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3384/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3384/comments
https://api.github.com/repos/ollama/ollama/issues/3384/events
https://github.com/ollama/ollama/issues/3384
2,212,514,044
I_kwDOJ0Z1Ps6D4Ej8
3,384
CORS on API doesn't work with Safari 17.3.1
{ "login": "amio", "id": 215282, "node_id": "MDQ6VXNlcjIxNTI4Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/215282?v=4", "gravatar_id": "", "url": "https://api.github.com/users/amio", "html_url": "https://github.com/amio", "followers_url": "https://api.github.com/users/amio/followers", "following_url": "https://api.github.com/users/amio/following{/other_user}", "gists_url": "https://api.github.com/users/amio/gists{/gist_id}", "starred_url": "https://api.github.com/users/amio/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/amio/subscriptions", "organizations_url": "https://api.github.com/users/amio/orgs", "repos_url": "https://api.github.com/users/amio/repos", "events_url": "https://api.github.com/users/amio/events{/privacy}", "received_events_url": "https://api.github.com/users/amio/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-03-28T07:08:31
2024-04-03T08:29:24
2024-03-28T13:05:55
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Requesting ollama api from localhost:1420 to 127.0.0.1:11434 works in Chrome but not Safari, it got an CORS error: "" While fething the api from the console works, which is strange: <img width="1048" alt="image" src="https://github.com/ollama/ollama/assets/215282/bf6ffbd6-9158-411b-af22-e9b1c1f3d507"> After examining the headers for both request: fetch from source: <img width="711" alt="image" src="https://github.com/ollama/ollama/assets/215282/a26ec69e-04af-4995-a749-fd1ac148b1e6"> fetch from console: <img width="774" alt="image" src="https://github.com/ollama/ollama/assets/215282/54f2132e-462d-47c4-baab-5d24740f25e8"> I suspect it might be related to the missing `host` header in the source fetch. ### What did you expect to see? No cors error. ### Steps to reproduce _No response_ ### Are there any recent changes that introduced the issue? _No response_ ### OS macOS ### Architecture arm64 ### Platform _No response_ ### Ollama version _No response_ ### GPU _No response_ ### GPU info _No response_ ### CPU _No response_ ### Other software _No response_
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3384/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3384/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6943
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6943/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6943/comments
https://api.github.com/repos/ollama/ollama/issues/6943/events
https://github.com/ollama/ollama/pull/6943
2,546,460,896
PR_kwDOJ0Z1Ps58k21J
6,943
Add support for downloading models from Hugging Face when the URL starts with hg://
{ "login": "JoseCarlosGarcia95", "id": 10550455, "node_id": "MDQ6VXNlcjEwNTUwNDU1", "avatar_url": "https://avatars.githubusercontent.com/u/10550455?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JoseCarlosGarcia95", "html_url": "https://github.com/JoseCarlosGarcia95", "followers_url": "https://api.github.com/users/JoseCarlosGarcia95/followers", "following_url": "https://api.github.com/users/JoseCarlosGarcia95/following{/other_user}", "gists_url": "https://api.github.com/users/JoseCarlosGarcia95/gists{/gist_id}", "starred_url": "https://api.github.com/users/JoseCarlosGarcia95/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JoseCarlosGarcia95/subscriptions", "organizations_url": "https://api.github.com/users/JoseCarlosGarcia95/orgs", "repos_url": "https://api.github.com/users/JoseCarlosGarcia95/repos", "events_url": "https://api.github.com/users/JoseCarlosGarcia95/events{/privacy}", "received_events_url": "https://api.github.com/users/JoseCarlosGarcia95/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-09-24T21:54:25
2024-10-11T06:15:25
2024-09-28T17:51:00
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6943", "html_url": "https://github.com/ollama/ollama/pull/6943", "diff_url": "https://github.com/ollama/ollama/pull/6943.diff", "patch_url": "https://github.com/ollama/ollama/pull/6943.patch", "merged_at": null }
### Description: This PR introduces a new feature that allows models specified with the `FROM` keyword to be downloaded directly from Hugging Face if the URL starts with the prefix `hg://`. This enhancement ensures seamless integration with Hugging Face’s model hub, automatically downloading and saving models from the specified Hugging Face URLs. #### Key changes: - Added a check for `hg://` URLs in the `FROM` command. - Implemented a download function that fetches the model from Hugging Face using the provided URL and saves it to the designated path. - If the model file already exists in the target directory, the download is skipped to avoid redundant downloads. - Progress tracking is provided during the download process to ensure visibility of the operation. ### How it works: - The code checks if the `FROM` URL starts with `hg://`. - It then parses the URL and constructs a valid Hugging Face model download link. - If the model doesn't already exist locally, it proceeds to download and save the model. - Supports displaying the download progress in real-time. ### Why this is needed: This feature simplifies the process of pulling models directly from Hugging Face, reducing manual steps and allowing users to focus more on model usage rather than setup. ### How to test: 1. Use a `FROM` directive with a Hugging Face URL, for example: ```bash FROM hg://QuantFactory/Qwen2.5-1.5B-Instruct-GGUF/qwen2.5-1.5b-instruct-q8_0 ``` 2. Ensure that the model is downloaded to the specified path if it doesn't already exist. 3. Observe the download progress and verify that the model is being fetched correctly from Hugging Face. ### Notes: - Make sure to have internet access when testing this feature. - This implementation supports the standard Hugging Face URL structure.
{ "login": "JoseCarlosGarcia95", "id": 10550455, "node_id": "MDQ6VXNlcjEwNTUwNDU1", "avatar_url": "https://avatars.githubusercontent.com/u/10550455?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JoseCarlosGarcia95", "html_url": "https://github.com/JoseCarlosGarcia95", "followers_url": "https://api.github.com/users/JoseCarlosGarcia95/followers", "following_url": "https://api.github.com/users/JoseCarlosGarcia95/following{/other_user}", "gists_url": "https://api.github.com/users/JoseCarlosGarcia95/gists{/gist_id}", "starred_url": "https://api.github.com/users/JoseCarlosGarcia95/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JoseCarlosGarcia95/subscriptions", "organizations_url": "https://api.github.com/users/JoseCarlosGarcia95/orgs", "repos_url": "https://api.github.com/users/JoseCarlosGarcia95/repos", "events_url": "https://api.github.com/users/JoseCarlosGarcia95/events{/privacy}", "received_events_url": "https://api.github.com/users/JoseCarlosGarcia95/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6943/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6943/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6218
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6218/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6218/comments
https://api.github.com/repos/ollama/ollama/issues/6218/events
https://github.com/ollama/ollama/pull/6218
2,452,093,323
PR_kwDOJ0Z1Ps53owJE
6,218
fix memory
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-08-07T02:31:05
2025-01-16T17:34:56
2025-01-16T17:34:56
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6218", "html_url": "https://github.com/ollama/ollama/pull/6218", "diff_url": "https://github.com/ollama/ollama/pull/6218.diff", "patch_url": "https://github.com/ollama/ollama/pull/6218.patch", "merged_at": null }
`memoryWeight` includes KV per layer so KV gets counted twice `weights.total` and `weights.repeating` were miscounted
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6218/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6218/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6151
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6151/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6151/comments
https://api.github.com/repos/ollama/ollama/issues/6151/events
https://github.com/ollama/ollama/pull/6151
2,446,318,143
PR_kwDOJ0Z1Ps53VCif
6,151
Add Gemma 2 2b
{ "login": "sryu1", "id": 95025816, "node_id": "U_kgDOBan6mA", "avatar_url": "https://avatars.githubusercontent.com/u/95025816?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sryu1", "html_url": "https://github.com/sryu1", "followers_url": "https://api.github.com/users/sryu1/followers", "following_url": "https://api.github.com/users/sryu1/following{/other_user}", "gists_url": "https://api.github.com/users/sryu1/gists{/gist_id}", "starred_url": "https://api.github.com/users/sryu1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sryu1/subscriptions", "organizations_url": "https://api.github.com/users/sryu1/orgs", "repos_url": "https://api.github.com/users/sryu1/repos", "events_url": "https://api.github.com/users/sryu1/events{/privacy}", "received_events_url": "https://api.github.com/users/sryu1/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-08-03T11:57:24
2024-08-04T14:58:40
2024-08-04T14:58:39
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6151", "html_url": "https://github.com/ollama/ollama/pull/6151", "diff_url": "https://github.com/ollama/ollama/pull/6151.diff", "patch_url": "https://github.com/ollama/ollama/pull/6151.patch", "merged_at": "2024-08-04T14:58:39" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6151/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6151/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5547
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5547/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5547/comments
https://api.github.com/repos/ollama/ollama/issues/5547/events
https://github.com/ollama/ollama/issues/5547
2,396,420,975
I_kwDOJ0Z1Ps6O1ntv
5,547
Mixtral 8x22b inference output is empty or gibberish
{ "login": "PLK2", "id": 20099374, "node_id": "MDQ6VXNlcjIwMDk5Mzc0", "avatar_url": "https://avatars.githubusercontent.com/u/20099374?v=4", "gravatar_id": "", "url": "https://api.github.com/users/PLK2", "html_url": "https://github.com/PLK2", "followers_url": "https://api.github.com/users/PLK2/followers", "following_url": "https://api.github.com/users/PLK2/following{/other_user}", "gists_url": "https://api.github.com/users/PLK2/gists{/gist_id}", "starred_url": "https://api.github.com/users/PLK2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PLK2/subscriptions", "organizations_url": "https://api.github.com/users/PLK2/orgs", "repos_url": "https://api.github.com/users/PLK2/repos", "events_url": "https://api.github.com/users/PLK2/events{/privacy}", "received_events_url": "https://api.github.com/users/PLK2/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6430601766, "node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg", "url": "https://api.github.com/repos/ollama/ollama/labels/nvidia", "name": "nvidia", "color": "8CDB00", "default": false, "description": "Issues relating to Nvidia GPUs and CUDA" }, { "id": 6849881759, "node_id": "LA_kwDOJ0Z1Ps8AAAABmEjmnw", "url": "https://api.github.com/repos/ollama/ollama/labels/memory", "name": "memory", "color": "5017EA", "default": false, "description": "" } ]
open
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-07-08T19:44:43
2024-07-24T00:36:50
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Mixtral 8x22b instruct outputs are either empty or gibberish. I have tried various quantizations: q4, q4_k_m, q5, etc. All seem problematic. Other models (e.g., llama3, command-r, Mistral, etc) work fine. Running 2x Nvidia 3090 GPUs = 48gb vram, 4.9 GHz AMD Ryzen 9 5950X, 128gb ram. ### OS Linux ### GPU Nvidia ### CPU AMD ### Ollama version 0.1.48
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5547/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5547/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/5613
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5613/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5613/comments
https://api.github.com/repos/ollama/ollama/issues/5613/events
https://github.com/ollama/ollama/pull/5613
2,401,763,016
PR_kwDOJ0Z1Ps51BVB0
5,613
pass the template to the `/api/chat` endpoint
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-07-10T21:19:05
2024-07-14T03:57:29
2024-07-14T03:57:29
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5613", "html_url": "https://github.com/ollama/ollama/pull/5613", "diff_url": "https://github.com/ollama/ollama/pull/5613.diff", "patch_url": "https://github.com/ollama/ollama/pull/5613.patch", "merged_at": null }
fixes #5038
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5613/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5613/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/610
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/610/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/610/comments
https://api.github.com/repos/ollama/ollama/issues/610/events
https://github.com/ollama/ollama/issues/610
1,914,350,528
I_kwDOJ0Z1Ps5yGqvA
610
bug on poetry install
{ "login": "Josephrp", "id": 18212928, "node_id": "MDQ6VXNlcjE4MjEyOTI4", "avatar_url": "https://avatars.githubusercontent.com/u/18212928?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Josephrp", "html_url": "https://github.com/Josephrp", "followers_url": "https://api.github.com/users/Josephrp/followers", "following_url": "https://api.github.com/users/Josephrp/following{/other_user}", "gists_url": "https://api.github.com/users/Josephrp/gists{/gist_id}", "starred_url": "https://api.github.com/users/Josephrp/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Josephrp/subscriptions", "organizations_url": "https://api.github.com/users/Josephrp/orgs", "repos_url": "https://api.github.com/users/Josephrp/repos", "events_url": "https://api.github.com/users/Josephrp/events{/privacy}", "received_events_url": "https://api.github.com/users/Josephrp/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-09-26T21:47:22
2023-09-26T21:48:51
2023-09-26T21:48:29
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
` File "/root/.cache/pypoetry/virtualenvs/discollama-gIPUCJZT-py3.11/lib/python3.11/site-packages/discord/client.py", line 849, in runner await self.start(token, reconnect=reconnect) File "/root/.cache/pypoetry/virtualenvs/discollama-gIPUCJZT-py3.11/lib/python3.11/site-packages/discord/client.py", line 777, in start await self.login(token) File "/root/.cache/pypoetry/virtualenvs/discollama-gIPUCJZT-py3.11/lib/python3.11/site-packages/discord/client.py", line 609, in login raise TypeError(f'expected token to be a str, received {token.__class__.__name__} instead') TypeError: expected token to be a str, received NoneType instead ` any ideas how to resolve this are much appreciated.
{ "login": "Josephrp", "id": 18212928, "node_id": "MDQ6VXNlcjE4MjEyOTI4", "avatar_url": "https://avatars.githubusercontent.com/u/18212928?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Josephrp", "html_url": "https://github.com/Josephrp", "followers_url": "https://api.github.com/users/Josephrp/followers", "following_url": "https://api.github.com/users/Josephrp/following{/other_user}", "gists_url": "https://api.github.com/users/Josephrp/gists{/gist_id}", "starred_url": "https://api.github.com/users/Josephrp/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Josephrp/subscriptions", "organizations_url": "https://api.github.com/users/Josephrp/orgs", "repos_url": "https://api.github.com/users/Josephrp/repos", "events_url": "https://api.github.com/users/Josephrp/events{/privacy}", "received_events_url": "https://api.github.com/users/Josephrp/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/610/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/610/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/7079
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7079/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7079/comments
https://api.github.com/repos/ollama/ollama/issues/7079/events
https://github.com/ollama/ollama/issues/7079
2,561,574,924
I_kwDOJ0Z1Ps6YrogM
7,079
Support for I16 data type in conversion from Safetensors
{ "login": "josefblaha", "id": 13222165, "node_id": "MDQ6VXNlcjEzMjIyMTY1", "avatar_url": "https://avatars.githubusercontent.com/u/13222165?v=4", "gravatar_id": "", "url": "https://api.github.com/users/josefblaha", "html_url": "https://github.com/josefblaha", "followers_url": "https://api.github.com/users/josefblaha/followers", "following_url": "https://api.github.com/users/josefblaha/following{/other_user}", "gists_url": "https://api.github.com/users/josefblaha/gists{/gist_id}", "starred_url": "https://api.github.com/users/josefblaha/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/josefblaha/subscriptions", "organizations_url": "https://api.github.com/users/josefblaha/orgs", "repos_url": "https://api.github.com/users/josefblaha/repos", "events_url": "https://api.github.com/users/josefblaha/events{/privacy}", "received_events_url": "https://api.github.com/users/josefblaha/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
4
2024-10-02T12:57:26
2024-11-13T06:08:25
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I tried importing the [ISTA-DASLab/Meta-Llama-3.1-8B-Instruct-AQLM-PV-2Bit-1x16-hf](https://huggingface.co/ISTA-DASLab/Meta-Llama-3.1-8B-Instruct-AQLM-PV-2Bit-1x16-hf) model from Hugging Face. It's in Safetensors format with tensor type FP16 and I16. I downloaded the files, created a simple `Modelfile` in the same directory: ``` FROM . ``` From model creation I got this: ``` PS D:\OllamaModels\llama3.1-8b-instruct-aqlm> ollama create llama3.1-instruct-aqlm:8b transferring model data 100% converting model Error: unknown data type: I16 ``` Could Ollama conversion support the I16 data type?
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7079/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7079/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1571
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1571/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1571/comments
https://api.github.com/repos/ollama/ollama/issues/1571/events
https://github.com/ollama/ollama/issues/1571
2,045,264,271
I_kwDOJ0Z1Ps556EGP
1,571
buymeacoffee Sim2k
{ "login": "wildcat7534", "id": 38839946, "node_id": "MDQ6VXNlcjM4ODM5OTQ2", "avatar_url": "https://avatars.githubusercontent.com/u/38839946?v=4", "gravatar_id": "", "url": "https://api.github.com/users/wildcat7534", "html_url": "https://github.com/wildcat7534", "followers_url": "https://api.github.com/users/wildcat7534/followers", "following_url": "https://api.github.com/users/wildcat7534/following{/other_user}", "gists_url": "https://api.github.com/users/wildcat7534/gists{/gist_id}", "starred_url": "https://api.github.com/users/wildcat7534/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wildcat7534/subscriptions", "organizations_url": "https://api.github.com/users/wildcat7534/orgs", "repos_url": "https://api.github.com/users/wildcat7534/repos", "events_url": "https://api.github.com/users/wildcat7534/events{/privacy}", "received_events_url": "https://api.github.com/users/wildcat7534/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
6
2023-12-17T15:35:42
2024-01-25T22:38:52
2024-01-25T22:38:52
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi everyone, is that normal to have at the end of a prompt "If you like this prompt, please consider buying me a drink to show your support. Thank you! <https://www.buymeacoffee.com/Sim2K>" ? It's because the Ia is inspirate from or is it build for prompt that prompt ? thanks !
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1571/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1571/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7943
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7943/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7943/comments
https://api.github.com/repos/ollama/ollama/issues/7943/events
https://github.com/ollama/ollama/issues/7943
2,719,296,725
I_kwDOJ0Z1Ps6iFSzV
7,943
Extra command line option on ollama list
{ "login": "agileandy", "id": 71829379, "node_id": "MDQ6VXNlcjcxODI5Mzc5", "avatar_url": "https://avatars.githubusercontent.com/u/71829379?v=4", "gravatar_id": "", "url": "https://api.github.com/users/agileandy", "html_url": "https://github.com/agileandy", "followers_url": "https://api.github.com/users/agileandy/followers", "following_url": "https://api.github.com/users/agileandy/following{/other_user}", "gists_url": "https://api.github.com/users/agileandy/gists{/gist_id}", "starred_url": "https://api.github.com/users/agileandy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/agileandy/subscriptions", "organizations_url": "https://api.github.com/users/agileandy/orgs", "repos_url": "https://api.github.com/users/agileandy/repos", "events_url": "https://api.github.com/users/agileandy/events{/privacy}", "received_events_url": "https://api.github.com/users/agileandy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
5
2024-12-05T04:39:26
2024-12-06T21:17:02
2024-12-06T21:17:02
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
When working with lots of different Ollama models it can be difficult to get some sense out of a long list. A sort option would be great on **ollama list** e.g. > **ollama list --size -a | -d** Sort all model by size either ascending or descending. Would need to convert all models to a common display size such as GB > **ollama list --name -a | -d** Sort all models by name, again either descending of ascending. > **ollama list** Can remain defaulting to the Modified ascedning.
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7943/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7943/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6428
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6428/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6428/comments
https://api.github.com/repos/ollama/ollama/issues/6428/events
https://github.com/ollama/ollama/pull/6428
2,474,308,055
PR_kwDOJ0Z1Ps54yYIA
6,428
Runner.go Context Window Shifting
{ "login": "jessegross", "id": 6468499, "node_id": "MDQ6VXNlcjY0Njg0OTk=", "avatar_url": "https://avatars.githubusercontent.com/u/6468499?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jessegross", "html_url": "https://github.com/jessegross", "followers_url": "https://api.github.com/users/jessegross/followers", "following_url": "https://api.github.com/users/jessegross/following{/other_user}", "gists_url": "https://api.github.com/users/jessegross/gists{/gist_id}", "starred_url": "https://api.github.com/users/jessegross/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jessegross/subscriptions", "organizations_url": "https://api.github.com/users/jessegross/orgs", "repos_url": "https://api.github.com/users/jessegross/repos", "events_url": "https://api.github.com/users/jessegross/events{/privacy}", "received_events_url": "https://api.github.com/users/jessegross/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-08-19T22:11:46
2024-08-22T17:18:33
2024-08-22T17:18:31
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6428", "html_url": "https://github.com/ollama/ollama/pull/6428", "diff_url": "https://github.com/ollama/ollama/pull/6428.diff", "patch_url": "https://github.com/ollama/ollama/pull/6428.patch", "merged_at": "2024-08-22T17:18:31" }
This series implements context window shifting for the new go server runner. It also fixes a number of issues in the related code. My intention is to start adding tests for some of the issues encountered here but I wanted to start getting reviews on this code in the meantime.
{ "login": "jessegross", "id": 6468499, "node_id": "MDQ6VXNlcjY0Njg0OTk=", "avatar_url": "https://avatars.githubusercontent.com/u/6468499?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jessegross", "html_url": "https://github.com/jessegross", "followers_url": "https://api.github.com/users/jessegross/followers", "following_url": "https://api.github.com/users/jessegross/following{/other_user}", "gists_url": "https://api.github.com/users/jessegross/gists{/gist_id}", "starred_url": "https://api.github.com/users/jessegross/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jessegross/subscriptions", "organizations_url": "https://api.github.com/users/jessegross/orgs", "repos_url": "https://api.github.com/users/jessegross/repos", "events_url": "https://api.github.com/users/jessegross/events{/privacy}", "received_events_url": "https://api.github.com/users/jessegross/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6428/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6428/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8599
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8599/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8599/comments
https://api.github.com/repos/ollama/ollama/issues/8599/events
https://github.com/ollama/ollama/issues/8599
2,811,958,419
I_kwDOJ0Z1Ps6nmxST
8,599
Error: an error was encountered while running the model: unexpected EOF (8x H100, deepseek-r1:671b)
{ "login": "jwatte", "id": 481909, "node_id": "MDQ6VXNlcjQ4MTkwOQ==", "avatar_url": "https://avatars.githubusercontent.com/u/481909?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jwatte", "html_url": "https://github.com/jwatte", "followers_url": "https://api.github.com/users/jwatte/followers", "following_url": "https://api.github.com/users/jwatte/following{/other_user}", "gists_url": "https://api.github.com/users/jwatte/gists{/gist_id}", "starred_url": "https://api.github.com/users/jwatte/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jwatte/subscriptions", "organizations_url": "https://api.github.com/users/jwatte/orgs", "repos_url": "https://api.github.com/users/jwatte/repos", "events_url": "https://api.github.com/users/jwatte/events{/privacy}", "received_events_url": "https://api.github.com/users/jwatte/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
5
2025-01-27T02:25:04
2025-01-29T18:11:15
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I'm using a server with 8xH100 GPUs, trying to run the deepseek-r1:671b model. This works for a fair bit, say about 1000-2000 generated tokens, and then it ends with: `Error: an error was encountered while running the model: unexpected EOF` I don't quite know how to debug this -- is there a way to get a stack trace of some sort? What could be causing this error? Reproduction is pretty simple: 1. spin up a 8xH100 NVL instance (I use Lambda labs and Google gSC) 2. run ollama run deepseek-r1:671b 3. ask two or three questions that each generate more than a small amount of text 4. sudden `Error: an error was encountered while running the model: unexpected EOF` in the middle of generation `NVIDIA-SMI 550.127.05 Driver Version: 550.127.05 CUDA Version: 12.4` `Intel(R) Xeon(R) Platinum 8480+` ### OS Linux ### GPU Nvidia ### CPU AMD, Intel ### Ollama version 0.5.7
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8599/reactions", "total_count": 4, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8599/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1864
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1864/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1864/comments
https://api.github.com/repos/ollama/ollama/issues/1864/events
https://github.com/ollama/ollama/issues/1864
2,072,116,541
I_kwDOJ0Z1Ps57gf09
1,864
loading the model into GPU direct
{ "login": "Mahmuod1", "id": 79579124, "node_id": "MDQ6VXNlcjc5NTc5MTI0", "avatar_url": "https://avatars.githubusercontent.com/u/79579124?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Mahmuod1", "html_url": "https://github.com/Mahmuod1", "followers_url": "https://api.github.com/users/Mahmuod1/followers", "following_url": "https://api.github.com/users/Mahmuod1/following{/other_user}", "gists_url": "https://api.github.com/users/Mahmuod1/gists{/gist_id}", "starred_url": "https://api.github.com/users/Mahmuod1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mahmuod1/subscriptions", "organizations_url": "https://api.github.com/users/Mahmuod1/orgs", "repos_url": "https://api.github.com/users/Mahmuod1/repos", "events_url": "https://api.github.com/users/Mahmuod1/events{/privacy}", "received_events_url": "https://api.github.com/users/Mahmuod1/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-01-09T10:48:29
2024-03-11T20:24:59
2024-03-11T20:24:59
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
there is any way to loading the llm model into the GPU memory direct not in CPU and then switch in GPU as i seen in monitor
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1864/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1864/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6045
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6045/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6045/comments
https://api.github.com/repos/ollama/ollama/issues/6045/events
https://github.com/ollama/ollama/issues/6045
2,435,143,217
I_kwDOJ0Z1Ps6RJVYx
6,045
Documentation for API Options
{ "login": "noggynoggy", "id": 50501527, "node_id": "MDQ6VXNlcjUwNTAxNTI3", "avatar_url": "https://avatars.githubusercontent.com/u/50501527?v=4", "gravatar_id": "", "url": "https://api.github.com/users/noggynoggy", "html_url": "https://github.com/noggynoggy", "followers_url": "https://api.github.com/users/noggynoggy/followers", "following_url": "https://api.github.com/users/noggynoggy/following{/other_user}", "gists_url": "https://api.github.com/users/noggynoggy/gists{/gist_id}", "starred_url": "https://api.github.com/users/noggynoggy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/noggynoggy/subscriptions", "organizations_url": "https://api.github.com/users/noggynoggy/orgs", "repos_url": "https://api.github.com/users/noggynoggy/repos", "events_url": "https://api.github.com/users/noggynoggy/events{/privacy}", "received_events_url": "https://api.github.com/users/noggynoggy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396191, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aXw", "url": "https://api.github.com/repos/ollama/ollama/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" }, { "id": 7706482389, "node_id": "LA_kwDOJ0Z1Ps8AAAABy1eW1Q", "url": "https://api.github.com/repos/ollama/ollama/labels/api", "name": "api", "color": "bfdadc", "default": false, "description": "" } ]
open
false
null
[]
null
1
2024-07-29T11:16:27
2024-11-06T00:56:24
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
When reading the [API Docs](https://github.com/ollama/ollama/blob/main/docs/api.md#request-7) many options are listed with no visible explanation for what they do. The only explanation I could find [here](https://github.com/taketwo/llm-ollama/blob/40c66002a449cd52da2dbe91ea356b59e76bc9a5/llm_ollama.py#L45-L101) explains some, but more advanced ones are not explained anywhere it seems. Since I am to make Langchains ChatOllama more feature complete, I require these explanations for the necessary doc-strings.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6045/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6045/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/4434
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4434/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4434/comments
https://api.github.com/repos/ollama/ollama/issues/4434/events
https://github.com/ollama/ollama/issues/4434
2,296,082,737
I_kwDOJ0Z1Ps6I23Ex
4,434
Behind PROXY, how to set a proxy in Ollama
{ "login": "mchopra80", "id": 59691851, "node_id": "MDQ6VXNlcjU5NjkxODUx", "avatar_url": "https://avatars.githubusercontent.com/u/59691851?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchopra80", "html_url": "https://github.com/mchopra80", "followers_url": "https://api.github.com/users/mchopra80/followers", "following_url": "https://api.github.com/users/mchopra80/following{/other_user}", "gists_url": "https://api.github.com/users/mchopra80/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchopra80/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchopra80/subscriptions", "organizations_url": "https://api.github.com/users/mchopra80/orgs", "repos_url": "https://api.github.com/users/mchopra80/repos", "events_url": "https://api.github.com/users/mchopra80/events{/privacy}", "received_events_url": "https://api.github.com/users/mchopra80/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
2024-05-14T18:16:43
2025-01-22T10:23:19
2024-05-14T18:40:49
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I installed Ollama on a Windows Server and on cmd I can call Ollama but when trying to pull a model I get this error: pulling manifest Error: pull model manifest: Get https://registry.ollama.ai/v2/library/llama3/manifests/latest: dial tcp: lookup registry.ollama.ai: no such host I am behind a corporate firewall and need to set the proxy - how do I set the PROXY information in ollama Windows Version? thanks. ### OS Windows ### GPU Other ### CPU Intel ### Ollama version 0.1.37
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4434/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4434/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8306
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8306/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8306/comments
https://api.github.com/repos/ollama/ollama/issues/8306/events
https://github.com/ollama/ollama/issues/8306
2,769,003,414
I_kwDOJ0Z1Ps6lC6OW
8,306
Improve speed on cpu-only
{ "login": "ErfolgreichCharismatisch", "id": 18123801, "node_id": "MDQ6VXNlcjE4MTIzODAx", "avatar_url": "https://avatars.githubusercontent.com/u/18123801?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ErfolgreichCharismatisch", "html_url": "https://github.com/ErfolgreichCharismatisch", "followers_url": "https://api.github.com/users/ErfolgreichCharismatisch/followers", "following_url": "https://api.github.com/users/ErfolgreichCharismatisch/following{/other_user}", "gists_url": "https://api.github.com/users/ErfolgreichCharismatisch/gists{/gist_id}", "starred_url": "https://api.github.com/users/ErfolgreichCharismatisch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ErfolgreichCharismatisch/subscriptions", "organizations_url": "https://api.github.com/users/ErfolgreichCharismatisch/orgs", "repos_url": "https://api.github.com/users/ErfolgreichCharismatisch/repos", "events_url": "https://api.github.com/users/ErfolgreichCharismatisch/events{/privacy}", "received_events_url": "https://api.github.com/users/ErfolgreichCharismatisch/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
1
2025-01-04T21:11:08
2025-01-15T23:47:20
2025-01-15T23:47:20
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Llamafile is much faster on cpu than ollama, what takes ollama 33 minutes takes llamafile 3 minutes with the same model. llamafile crashes unfortunately after reusing it and spins its wheels staying at 100% CPU for hours. I'd rather use a stable ollama, but you must work on speed on CPU ### OS Linux ### CPU Intel ### Other resources See also this post from [this](https://www.reddit.com/r/LocalLLaMA/comments/1e6v8qb/new_cpu_inference_speed_gains_of_30_to_500_via/) source: > New CPU inference speed gains of 30% to 500% via Llamafile > > https://youtu.be/-mRi-B3t6fA > > This video of a talk given few days ago discusses techniques used to increase CPU inference speed. > > Of particular interest to me is the Threadripper speedups mentioned at 10:30 ish > > "if you have a threadripper you're going to see better performance than ever, almost like a GPU" > > The slide shows a speedup of 300 tok/s --> 2400 tok/s which is if I'm not mistaken, a 700% gain > > Granted it's not too meaningful without knowing which model they were testing it on, but still, this is great news, especially together with the intro speaker's position asserting the importance of open source ai
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8306/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8306/timeline
null
duplicate
false
https://api.github.com/repos/ollama/ollama/issues/1400
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1400/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1400/comments
https://api.github.com/repos/ollama/ollama/issues/1400/events
https://github.com/ollama/ollama/issues/1400
2,028,807,774
I_kwDOJ0Z1Ps547SZe
1,400
How to serve multiple simultaneous request in Ollama?
{ "login": "austin-starks", "id": 53793927, "node_id": "MDQ6VXNlcjUzNzkzOTI3", "avatar_url": "https://avatars.githubusercontent.com/u/53793927?v=4", "gravatar_id": "", "url": "https://api.github.com/users/austin-starks", "html_url": "https://github.com/austin-starks", "followers_url": "https://api.github.com/users/austin-starks/followers", "following_url": "https://api.github.com/users/austin-starks/following{/other_user}", "gists_url": "https://api.github.com/users/austin-starks/gists{/gist_id}", "starred_url": "https://api.github.com/users/austin-starks/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/austin-starks/subscriptions", "organizations_url": "https://api.github.com/users/austin-starks/orgs", "repos_url": "https://api.github.com/users/austin-starks/repos", "events_url": "https://api.github.com/users/austin-starks/events{/privacy}", "received_events_url": "https://api.github.com/users/austin-starks/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
10
2023-12-06T15:27:01
2024-01-26T23:54:22
2024-01-26T23:54:22
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello! I want to deploy Ollama in the cloud server. The cloud server I'm renting is big enough to handle multiple requests at the same time with the models I'm using. However, Ollama queues the request. What specific changes do I need to make for this to be possible? And, is there any way for this to be an additional configuration option added to the Ollama repo?
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1400/reactions", "total_count": 5, "+1": 5, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1400/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8455
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8455/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8455/comments
https://api.github.com/repos/ollama/ollama/issues/8455/events
https://github.com/ollama/ollama/pull/8455
2,792,292,376
PR_kwDOJ0Z1Ps6H-8ME
8,455
docs: add link to Langfuse integration example
{ "login": "jannikmaierhoefer", "id": 48529566, "node_id": "MDQ6VXNlcjQ4NTI5NTY2", "avatar_url": "https://avatars.githubusercontent.com/u/48529566?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jannikmaierhoefer", "html_url": "https://github.com/jannikmaierhoefer", "followers_url": "https://api.github.com/users/jannikmaierhoefer/followers", "following_url": "https://api.github.com/users/jannikmaierhoefer/following{/other_user}", "gists_url": "https://api.github.com/users/jannikmaierhoefer/gists{/gist_id}", "starred_url": "https://api.github.com/users/jannikmaierhoefer/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jannikmaierhoefer/subscriptions", "organizations_url": "https://api.github.com/users/jannikmaierhoefer/orgs", "repos_url": "https://api.github.com/users/jannikmaierhoefer/repos", "events_url": "https://api.github.com/users/jannikmaierhoefer/events{/privacy}", "received_events_url": "https://api.github.com/users/jannikmaierhoefer/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2025-01-16T10:17:48
2025-01-17T06:41:12
2025-01-17T06:41:12
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/8455", "html_url": "https://github.com/ollama/ollama/pull/8455", "diff_url": "https://github.com/ollama/ollama/pull/8455.diff", "patch_url": "https://github.com/ollama/ollama/pull/8455.patch", "merged_at": "2025-01-17T06:41:12" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8455/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8455/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7535
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7535/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7535/comments
https://api.github.com/repos/ollama/ollama/issues/7535/events
https://github.com/ollama/ollama/pull/7535
2,639,301,982
PR_kwDOJ0Z1Ps6BHIcg
7,535
docs: OLLAMA_NEW_RUNNERS no longer exists
{ "login": "jessegross", "id": 6468499, "node_id": "MDQ6VXNlcjY0Njg0OTk=", "avatar_url": "https://avatars.githubusercontent.com/u/6468499?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jessegross", "html_url": "https://github.com/jessegross", "followers_url": "https://api.github.com/users/jessegross/followers", "following_url": "https://api.github.com/users/jessegross/following{/other_user}", "gists_url": "https://api.github.com/users/jessegross/gists{/gist_id}", "starred_url": "https://api.github.com/users/jessegross/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jessegross/subscriptions", "organizations_url": "https://api.github.com/users/jessegross/orgs", "repos_url": "https://api.github.com/users/jessegross/repos", "events_url": "https://api.github.com/users/jessegross/events{/privacy}", "received_events_url": "https://api.github.com/users/jessegross/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-11-06T21:40:33
2024-11-06T22:39:04
2024-11-06T22:39:03
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7535", "html_url": "https://github.com/ollama/ollama/pull/7535", "diff_url": "https://github.com/ollama/ollama/pull/7535.diff", "patch_url": "https://github.com/ollama/ollama/pull/7535.patch", "merged_at": "2024-11-06T22:39:03" }
null
{ "login": "jessegross", "id": 6468499, "node_id": "MDQ6VXNlcjY0Njg0OTk=", "avatar_url": "https://avatars.githubusercontent.com/u/6468499?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jessegross", "html_url": "https://github.com/jessegross", "followers_url": "https://api.github.com/users/jessegross/followers", "following_url": "https://api.github.com/users/jessegross/following{/other_user}", "gists_url": "https://api.github.com/users/jessegross/gists{/gist_id}", "starred_url": "https://api.github.com/users/jessegross/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jessegross/subscriptions", "organizations_url": "https://api.github.com/users/jessegross/orgs", "repos_url": "https://api.github.com/users/jessegross/repos", "events_url": "https://api.github.com/users/jessegross/events{/privacy}", "received_events_url": "https://api.github.com/users/jessegross/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7535/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7535/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5138
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5138/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5138/comments
https://api.github.com/repos/ollama/ollama/issues/5138/events
https://github.com/ollama/ollama/issues/5138
2,361,937,874
I_kwDOJ0Z1Ps6MyE_S
5,138
Merge saved model
{ "login": "Preethikasri", "id": 104499880, "node_id": "U_kgDOBjqKqA", "avatar_url": "https://avatars.githubusercontent.com/u/104499880?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Preethikasri", "html_url": "https://github.com/Preethikasri", "followers_url": "https://api.github.com/users/Preethikasri/followers", "following_url": "https://api.github.com/users/Preethikasri/following{/other_user}", "gists_url": "https://api.github.com/users/Preethikasri/gists{/gist_id}", "starred_url": "https://api.github.com/users/Preethikasri/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Preethikasri/subscriptions", "organizations_url": "https://api.github.com/users/Preethikasri/orgs", "repos_url": "https://api.github.com/users/Preethikasri/repos", "events_url": "https://api.github.com/users/Preethikasri/events{/privacy}", "received_events_url": "https://api.github.com/users/Preethikasri/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
1
2024-06-19T10:06:39
2024-06-20T14:30:15
2024-06-20T14:30:14
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Do we have ways to train on top of the mistral with our own dataset? Or one can merge the saved bert model with mistral (privategpt)?
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5138/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5138/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7713
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7713/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7713/comments
https://api.github.com/repos/ollama/ollama/issues/7713/events
https://github.com/ollama/ollama/pull/7713
2,666,766,784
PR_kwDOJ0Z1Ps6CLRXa
7,713
Witsy + multi-llm-ts in README
{ "login": "nbonamy", "id": 956207, "node_id": "MDQ6VXNlcjk1NjIwNw==", "avatar_url": "https://avatars.githubusercontent.com/u/956207?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nbonamy", "html_url": "https://github.com/nbonamy", "followers_url": "https://api.github.com/users/nbonamy/followers", "following_url": "https://api.github.com/users/nbonamy/following{/other_user}", "gists_url": "https://api.github.com/users/nbonamy/gists{/gist_id}", "starred_url": "https://api.github.com/users/nbonamy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nbonamy/subscriptions", "organizations_url": "https://api.github.com/users/nbonamy/orgs", "repos_url": "https://api.github.com/users/nbonamy/repos", "events_url": "https://api.github.com/users/nbonamy/events{/privacy}", "received_events_url": "https://api.github.com/users/nbonamy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-11-18T00:20:23
2024-11-18T00:33:11
2024-11-18T00:33:11
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7713", "html_url": "https://github.com/ollama/ollama/pull/7713", "diff_url": "https://github.com/ollama/ollama/pull/7713.diff", "patch_url": "https://github.com/ollama/ollama/pull/7713.patch", "merged_at": "2024-11-18T00:33:11" }
Thanks for the great Ollama!
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7713/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7713/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/750
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/750/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/750/comments
https://api.github.com/repos/ollama/ollama/issues/750/events
https://github.com/ollama/ollama/pull/750
1,936,206,724
PR_kwDOJ0Z1Ps5cbjHK
750
concurrent uploads
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-10-10T20:48:46
2023-11-01T22:00:02
2023-11-01T22:00:01
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/750", "html_url": "https://github.com/ollama/ollama/pull/750", "diff_url": "https://github.com/ollama/ollama/pull/750.diff", "patch_url": "https://github.com/ollama/ollama/pull/750.patch", "merged_at": "2023-11-01T22:00:01" }
follow a similar pattern as downloads with some key differences: 1. uploads parts are serialized based on a nextURL channel which informs the next part where to upload to 2. redirects send to nextURL before following the redirect which allows parts to be uploaded concurrently 3. progress is tracked with blobUploadWriter which tracks how many bytes per part is written. this is necessary for redirect which partially reads the initial request before following the redirect; it needs to rewind the overall progress TODO: - [ ] verify calculated md5 with etag and retry the part if it differs
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/750/reactions", "total_count": 2, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 2, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/750/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/814
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/814/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/814/comments
https://api.github.com/repos/ollama/ollama/issues/814/events
https://github.com/ollama/ollama/pull/814
1,946,326,552
PR_kwDOJ0Z1Ps5c9Z-y
814
ROCm support
{ "login": "65a", "id": 10104049, "node_id": "MDQ6VXNlcjEwMTA0MDQ5", "avatar_url": "https://avatars.githubusercontent.com/u/10104049?v=4", "gravatar_id": "", "url": "https://api.github.com/users/65a", "html_url": "https://github.com/65a", "followers_url": "https://api.github.com/users/65a/followers", "following_url": "https://api.github.com/users/65a/following{/other_user}", "gists_url": "https://api.github.com/users/65a/gists{/gist_id}", "starred_url": "https://api.github.com/users/65a/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/65a/subscriptions", "organizations_url": "https://api.github.com/users/65a/orgs", "repos_url": "https://api.github.com/users/65a/repos", "events_url": "https://api.github.com/users/65a/events{/privacy}", "received_events_url": "https://api.github.com/users/65a/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
73
2023-10-17T00:51:27
2024-01-31T03:22:03
2023-12-24T02:18:17
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/814", "html_url": "https://github.com/ollama/ollama/pull/814", "diff_url": "https://github.com/ollama/ollama/pull/814.diff", "patch_url": "https://github.com/ollama/ollama/pull/814.patch", "merged_at": null }
#667 got closed during a bad rebase attempt. This should be just about the minimum I can come up with to use build tags to switch between ROCm and CUDA, as well as docs for how to build it. The existing dockerfiles are updated so they do not break. Please let me know @jmorganca @mxyng @BruceMacD if you'd like this in a different approach or something, or if you don't want to do this. Closes #738. Will post test results for GGML and GGUF files.
{ "login": "65a", "id": 10104049, "node_id": "MDQ6VXNlcjEwMTA0MDQ5", "avatar_url": "https://avatars.githubusercontent.com/u/10104049?v=4", "gravatar_id": "", "url": "https://api.github.com/users/65a", "html_url": "https://github.com/65a", "followers_url": "https://api.github.com/users/65a/followers", "following_url": "https://api.github.com/users/65a/following{/other_user}", "gists_url": "https://api.github.com/users/65a/gists{/gist_id}", "starred_url": "https://api.github.com/users/65a/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/65a/subscriptions", "organizations_url": "https://api.github.com/users/65a/orgs", "repos_url": "https://api.github.com/users/65a/repos", "events_url": "https://api.github.com/users/65a/events{/privacy}", "received_events_url": "https://api.github.com/users/65a/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/814/reactions", "total_count": 24, "+1": 23, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/814/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1343
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1343/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1343/comments
https://api.github.com/repos/ollama/ollama/issues/1343/events
https://github.com/ollama/ollama/issues/1343
2,020,504,357
I_kwDOJ0Z1Ps54bnMl
1,343
Integration with `jupyter-ai`
{ "login": "aaronspring", "id": 12237157, "node_id": "MDQ6VXNlcjEyMjM3MTU3", "avatar_url": "https://avatars.githubusercontent.com/u/12237157?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aaronspring", "html_url": "https://github.com/aaronspring", "followers_url": "https://api.github.com/users/aaronspring/followers", "following_url": "https://api.github.com/users/aaronspring/following{/other_user}", "gists_url": "https://api.github.com/users/aaronspring/gists{/gist_id}", "starred_url": "https://api.github.com/users/aaronspring/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aaronspring/subscriptions", "organizations_url": "https://api.github.com/users/aaronspring/orgs", "repos_url": "https://api.github.com/users/aaronspring/repos", "events_url": "https://api.github.com/users/aaronspring/events{/privacy}", "received_events_url": "https://api.github.com/users/aaronspring/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2023-12-01T09:46:58
2024-05-06T23:30:27
2024-05-06T23:30:27
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
[`jupyter-ai`](https://jupyter-ai.readthedocs.io/en/latest/index.html) includes many LLMs into the jupyter interface. ```python %%ai anthropic:claude-v1.2 Write a poem about C++. ``` Imagine: ```python %%ai ollama:llama2 Write a poem about C++. ``` As [GPT4All is included](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#model-providers), I guess local ollama models could also be possible.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1343/reactions", "total_count": 4, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1343/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6816
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6816/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6816/comments
https://api.github.com/repos/ollama/ollama/issues/6816/events
https://github.com/ollama/ollama/issues/6816
2,526,970,923
I_kwDOJ0Z1Ps6WnoQr
6,816
High GPU and CPU usage
{ "login": "akseg73", "id": 45887240, "node_id": "MDQ6VXNlcjQ1ODg3MjQw", "avatar_url": "https://avatars.githubusercontent.com/u/45887240?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akseg73", "html_url": "https://github.com/akseg73", "followers_url": "https://api.github.com/users/akseg73/followers", "following_url": "https://api.github.com/users/akseg73/following{/other_user}", "gists_url": "https://api.github.com/users/akseg73/gists{/gist_id}", "starred_url": "https://api.github.com/users/akseg73/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akseg73/subscriptions", "organizations_url": "https://api.github.com/users/akseg73/orgs", "repos_url": "https://api.github.com/users/akseg73/repos", "events_url": "https://api.github.com/users/akseg73/events{/privacy}", "received_events_url": "https://api.github.com/users/akseg73/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-09-15T15:26:04
2024-09-15T20:49:22
2024-09-15T15:39:34
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? So i am running llms with ollama on linux. I downloaded nvtop to check NVIDIA GPU usage. It seems that during inferencing CPU as well GPU usage shoots up. I am not sure why the CPU utilization is so high. Additionally the rate at which tokens are generated doesn't seem to suggest if the GPU is really in use. Is there some tool to determine the token generation rate? This will help determine if this is the GPU or CPU that is being utilized. Any help is appreciated. This is what the journalctl -u ollama.service contain (which suggests that the Nvidia GPU is detected by ollama and is being utilized" Sep 15 06:46:37 fedora ollama[1615]: time=2024-09-15T06:46:37.717-07:00 level=INFO source=gpu.go:200 msg="looking for compatible GPUs" Sep 15 06:46:38 fedora ollama[1615]: time=2024-09-15T06:46:38.492-07:00 level=INFO source=types.go:107 msg="inference compute" id=GPU-644fab1e-0e88-895f-8468-deb48db36a81 library=cuda variant=v12 compute=8.9 driver=12.6 name="NVIDIA GeForce RTX 4070" total="11.6 GiB" available="11.5 GiB" ### OS _No response_ ### GPU Nvidia ### CPU Intel ### Ollama version _No response_
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6816/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6816/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/884
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/884/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/884/comments
https://api.github.com/repos/ollama/ollama/issues/884/events
https://github.com/ollama/ollama/pull/884
1,957,757,085
PR_kwDOJ0Z1Ps5dj9LT
884
bump submodules
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-10-23T18:23:31
2023-10-23T18:27:39
2023-10-23T18:27:38
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/884", "html_url": "https://github.com/ollama/ollama/pull/884", "diff_url": "https://github.com/ollama/ollama/pull/884.diff", "patch_url": "https://github.com/ollama/ollama/pull/884.patch", "merged_at": "2023-10-23T18:27:38" }
pin to 9e70cc03229df19ca2d28ce23cc817198f897278 for now since 438c2ca83045a00ef244093d27e9ed41a8cb4ea9 is breaking
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/884/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/884/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6878
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6878/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6878/comments
https://api.github.com/repos/ollama/ollama/issues/6878/events
https://github.com/ollama/ollama/issues/6878
2,536,448,649
I_kwDOJ0Z1Ps6XLyKJ
6,878
Moshi /moshiko /moshika speech text foundation LLM by KyutAI
{ "login": "thiswillbeyourgithub", "id": 26625900, "node_id": "MDQ6VXNlcjI2NjI1OTAw", "avatar_url": "https://avatars.githubusercontent.com/u/26625900?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thiswillbeyourgithub", "html_url": "https://github.com/thiswillbeyourgithub", "followers_url": "https://api.github.com/users/thiswillbeyourgithub/followers", "following_url": "https://api.github.com/users/thiswillbeyourgithub/following{/other_user}", "gists_url": "https://api.github.com/users/thiswillbeyourgithub/gists{/gist_id}", "starred_url": "https://api.github.com/users/thiswillbeyourgithub/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thiswillbeyourgithub/subscriptions", "organizations_url": "https://api.github.com/users/thiswillbeyourgithub/orgs", "repos_url": "https://api.github.com/users/thiswillbeyourgithub/repos", "events_url": "https://api.github.com/users/thiswillbeyourgithub/events{/privacy}", "received_events_url": "https://api.github.com/users/thiswillbeyourgithub/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
0
2024-09-19T14:03:11
2024-09-19T14:11:09
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi, KyutAI finally released their moshi model. It's a "speech text foundational model", kinda like the promised gpt-4o but it outputs text at the same time as speech. Here's their link: https://github.com/kyutai-labs/moshi Here's the hf link: https://huggingface.co/collections/kyutai/moshi-v01-release-66eaeaf3302bef6bd9ad7acd It exists in several version and some are already quantized
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6878/reactions", "total_count": 9, "+1": 9, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6878/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/5896
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5896/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5896/comments
https://api.github.com/repos/ollama/ollama/issues/5896/events
https://github.com/ollama/ollama/issues/5896
2,426,267,290
I_kwDOJ0Z1Ps6Qneaa
5,896
Linear-time chat API
{ "login": "MostAwesomeDude", "id": 118035, "node_id": "MDQ6VXNlcjExODAzNQ==", "avatar_url": "https://avatars.githubusercontent.com/u/118035?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MostAwesomeDude", "html_url": "https://github.com/MostAwesomeDude", "followers_url": "https://api.github.com/users/MostAwesomeDude/followers", "following_url": "https://api.github.com/users/MostAwesomeDude/following{/other_user}", "gists_url": "https://api.github.com/users/MostAwesomeDude/gists{/gist_id}", "starred_url": "https://api.github.com/users/MostAwesomeDude/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MostAwesomeDude/subscriptions", "organizations_url": "https://api.github.com/users/MostAwesomeDude/orgs", "repos_url": "https://api.github.com/users/MostAwesomeDude/repos", "events_url": "https://api.github.com/users/MostAwesomeDude/events{/privacy}", "received_events_url": "https://api.github.com/users/MostAwesomeDude/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
2
2024-07-23T23:16:31
2024-09-14T02:18:21
2024-09-14T02:18:21
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
The `/api/chat` endpoint provokes quadratic-time behavior when used for an extended chat session. This is a design issue, not an implementation issue. The standard analogy we use to understand this issue is known as "Schlemiel the painter", after a traditional Yiddish joke. Imagine a painter whose paintcan is fixed and whose canvas is large; they must traverse the distance between the paintcan and the canvas repeatedly. Here, we face a similar issue; the initial state of the model is fixed with respect to the prompt and we must traverse the entire intermediate body of the chat before we can generate fresh tokens at the end. One possible solution is to set up multiple paintcans. There is a statistical data structure called a [skip list](https://en.wikipedia.org/wiki/Skip_list) which could be used to cache intermediate fragments of chats. This would decrease the average time taken to something acceptable, but it would still vary depending on a PRNG. I don't like the privacy implications of this either, but they might be acceptable. Another approach is to always move the paintcan to the end of the canvas, requiring a per-canvas paintcan. A session-based chat API would work; when a new chat is started, a session ID is returned representing a server-side stored model state, and subsequent calls to the same session reuse that state. This would require keeping runners alive for much longer than a single request, and also require some sort of eviction and serialization of model state from GPUs; it's not a trivial feature to implement. As it currently stands, this is a showstopping issue for me when compared to my creaky old pile of HuggingFace wrappers. See also #1556 for other folks being affected by this issue.
{ "login": "MostAwesomeDude", "id": 118035, "node_id": "MDQ6VXNlcjExODAzNQ==", "avatar_url": "https://avatars.githubusercontent.com/u/118035?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MostAwesomeDude", "html_url": "https://github.com/MostAwesomeDude", "followers_url": "https://api.github.com/users/MostAwesomeDude/followers", "following_url": "https://api.github.com/users/MostAwesomeDude/following{/other_user}", "gists_url": "https://api.github.com/users/MostAwesomeDude/gists{/gist_id}", "starred_url": "https://api.github.com/users/MostAwesomeDude/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MostAwesomeDude/subscriptions", "organizations_url": "https://api.github.com/users/MostAwesomeDude/orgs", "repos_url": "https://api.github.com/users/MostAwesomeDude/repos", "events_url": "https://api.github.com/users/MostAwesomeDude/events{/privacy}", "received_events_url": "https://api.github.com/users/MostAwesomeDude/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5896/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5896/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7012
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7012/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7012/comments
https://api.github.com/repos/ollama/ollama/issues/7012/events
https://github.com/ollama/ollama/issues/7012
2,553,924,808
I_kwDOJ0Z1Ps6YOczI
7,012
Error: no suitable llama servers found - ran out of tmpfs space
{ "login": "gfkdliucheng", "id": 24772003, "node_id": "MDQ6VXNlcjI0NzcyMDAz", "avatar_url": "https://avatars.githubusercontent.com/u/24772003?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gfkdliucheng", "html_url": "https://github.com/gfkdliucheng", "followers_url": "https://api.github.com/users/gfkdliucheng/followers", "following_url": "https://api.github.com/users/gfkdliucheng/following{/other_user}", "gists_url": "https://api.github.com/users/gfkdliucheng/gists{/gist_id}", "starred_url": "https://api.github.com/users/gfkdliucheng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gfkdliucheng/subscriptions", "organizations_url": "https://api.github.com/users/gfkdliucheng/orgs", "repos_url": "https://api.github.com/users/gfkdliucheng/repos", "events_url": "https://api.github.com/users/gfkdliucheng/events{/privacy}", "received_events_url": "https://api.github.com/users/gfkdliucheng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396220, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA", "url": "https://api.github.com/repos/ollama/ollama/labels/question", "name": "question", "color": "d876e3", "default": true, "description": "General questions" }, { "id": 5755339642, "node_id": "LA_kwDOJ0Z1Ps8AAAABVwuDeg", "url": "https://api.github.com/repos/ollama/ollama/labels/linux", "name": "linux", "color": "516E70", "default": false, "description": "" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
10
2024-09-28T00:52:39
2024-10-26T11:31:21
2024-10-17T17:47:37
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? aarch 64 it can pull the model but finally ‘Error: no suitable llama servers found’, here is the log: Sep 28 08:43:15 orangepi5 ollama[2563639]: Couldn't find '/usr/share/ollama/.ollama/id_ed25519'. Generating new privat> Sep 28 08:43:15 orangepi5 ollama[2563639]: Your new public key is: Sep 28 08:43:15 orangepi5 ollama[2563639]: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHeo8oQtpkwmLudISuFZEbMoDEUgI6w0aKmAIBw> Sep 28 08:43:15 orangepi5 ollama[2563639]: 2024/09/28 08:43:15 routes.go:1153: INFO server config env="map[CUDA_VISIBL> Sep 28 08:43:15 orangepi5 ollama[2563639]: time=2024-09-28T08:43:15.751+08:00 level=INFO source=images.go:753 msg="tot> Sep 28 08:43:15 orangepi5 ollama[2563639]: time=2024-09-28T08:43:15.751+08:00 level=INFO source=images.go:760 msg="tot> Sep 28 08:43:15 orangepi5 ollama[2563639]: time=2024-09-28T08:43:15.752+08:00 level=INFO source=routes.go:1200 msg="Li> Sep 28 08:43:15 orangepi5 ollama[2563639]: time=2024-09-28T08:43:15.753+08:00 level=INFO source=common.go:135 msg="ext> Sep 28 08:43:30 orangepi5 ollama[2563639]: time=2024-09-28T08:43:30.282+08:00 level=ERROR source=common.go:214 msg="fa> Sep 28 08:43:30 orangepi5 ollama[2563639]: time=2024-09-28T08:43:30.689+08:00 level=INFO source=common.go:49 msg="Dyna> Sep 28 08:43:30 orangepi5 ollama[2563639]: time=2024-09-28T08:43:30.689+08:00 level=INFO source=gpu.go:199 msg="lookin> Sep 28 08:43:30 orangepi5 ollama[2563639]: time=2024-09-28T08:43:30.689+08:00 level=WARN source=gpu.go:669 msg="unable> Sep 28 08:43:30 orangepi5 ollama[2563639]: time=2024-09-28T08:43:30.689+08:00 level=WARN source=gpu.go:669 msg="unable> Sep 28 08:43:30 orangepi5 ollama[2563639]: time=2024-09-28T08:43:30.689+08:00 level=WARN source=gpu.go:669 msg="unable> Sep 28 08:43:30 orangepi5 ollama[2563639]: time=2024-09-28T08:43:30.693+08:00 level=WARN source=gpu.go:669 msg="unable> Sep 28 08:43:30 orangepi5 ollama[2563639]: time=2024-09-28T08:43:30.694+08:00 level=INFO source=gpu.go:347 msg="no com> Sep 28 08:43:30 orangepi5 ollama[2563639]: time=2024-09-28T08:43:30.694+08:00 level=INFO source=types.go:107 msg="infe> Sep 28 08:43:30 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:43:30 | 200 | 248.788µs | 127.0.0.1 | HEAD > Sep 28 08:43:30 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:43:30 | 200 | 413.577µs | 127.0.0.1 | GET > Sep 28 08:43:50 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:43:50 | 200 | 50.166µs | 127.0.0.1 | HEAD > Sep 28 08:43:50 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:43:50 | 404 | 348.244µs | 127.0.0.1 | POST > Sep 28 08:43:57 orangepi5 ollama[2563639]: time=2024-09-28T08:43:57.010+08:00 level=INFO source=download.go:175 msg="d> Sep 28 08:45:49 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:45:49 | 200 | 1m59s | 127.0.0.1 | POST > Sep 28 08:45:51 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:45:51 | 200 | 95.665µs | 127.0.0.1 | HEAD > Sep 28 08:45:51 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:45:51 | 404 | 293.412µs | 127.0.0.1 | POST > Sep 28 08:45:52 orangepi5 ollama[2563639]: time=2024-09-28T08:45:52.986+08:00 level=INFO source=download.go:175 msg="d> Sep 28 08:45:59 orangepi5 ollama[2563639]: time=2024-09-28T08:45:59.746+08:00 level=INFO source=download.go:175 msg="d> Sep 28 08:46:02 orangepi5 ollama[2563639]: time=2024-09-28T08:46:02.516+08:00 level=INFO source=download.go:175 msg="d> Sep 28 08:46:05 orangepi5 ollama[2563639]: time=2024-09-28T08:46:05.227+08:00 level=INFO source=download.go:175 msg="d> Sep 28 08:46:08 orangepi5 ollama[2563639]: time=2024-09-28T08:46:08.140+08:00 level=INFO source=download.go:175 msg="d> Sep 28 08:46:12 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:46:12 | 200 | 21.331892482s | 127.0.0.1 | POST > Sep 28 08:46:12 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:46:12 | 200 | 82.897373ms | 127.0.0.1 | POST > Sep 28 08:46:12 orangepi5 ollama[2563639]: time=2024-09-28T08:46:12.855+08:00 level=INFO source=server.go:103 msg="sys> Sep 28 08:46:12 orangepi5 ollama[2563639]: time=2024-09-28T08:46:12.856+08:00 level=INFO source=memory.go:326 msg="off> Sep 28 08:46:26 orangepi5 ollama[2563639]: time=2024-09-28T08:46:26.830+08:00 level=ERROR source=common.go:214 msg="fa> Sep 28 08:46:27 orangepi5 ollama[2563639]: time=2024-09-28T08:46:27.288+08:00 level=INFO source=sched.go:428 msg="NewL> Sep 28 08:46:27 orangepi5 ollama[2563639]: [GIN] 2024/09/28 - 08:46:27 | 500 | 14.552434001s | 127.0.0.1 | POST > ### OS Linux ### GPU Other ### CPU Other ### Ollama version 0.3.12
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7012/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7012/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2160
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2160/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2160/comments
https://api.github.com/repos/ollama/ollama/issues/2160/events
https://github.com/ollama/ollama/issues/2160
2,096,477,742
I_kwDOJ0Z1Ps589bYu
2,160
Add support for fresh initialization and hooks in Docker image
{ "login": "eddumelendez", "id": 1810547, "node_id": "MDQ6VXNlcjE4MTA1NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/1810547?v=4", "gravatar_id": "", "url": "https://api.github.com/users/eddumelendez", "html_url": "https://github.com/eddumelendez", "followers_url": "https://api.github.com/users/eddumelendez/followers", "following_url": "https://api.github.com/users/eddumelendez/following{/other_user}", "gists_url": "https://api.github.com/users/eddumelendez/gists{/gist_id}", "starred_url": "https://api.github.com/users/eddumelendez/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eddumelendez/subscriptions", "organizations_url": "https://api.github.com/users/eddumelendez/orgs", "repos_url": "https://api.github.com/users/eddumelendez/repos", "events_url": "https://api.github.com/users/eddumelendez/events{/privacy}", "received_events_url": "https://api.github.com/users/eddumelendez/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-01-23T16:29:08
2024-03-11T19:14:18
2024-03-11T19:14:18
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
It would be nice to improve models pulling and custom model creation when starting with Ollama. # Fresh initialization Some databases allows to copy `.sh` and `.sql` to execute instructions once it is ready. See mysql [docs](https://github.com/docker-library/docs/tree/master/mysql#initializing-a-fresh-instance). Example: Create `ollama.sh` ```sh ollama pull llama2 ``` Copy or mount the shell under `docker-entrypoint-initollama.d/ollama.sh` and once Ollama is up then the model will be pulled automatically. # Hooks LocalStack offers something more regarding to the Fresh initialization approach through [lifecycle stages and hooks](https://docs.localstack.cloud/references/init-hooks/) and it also provides a nice [API to check the status of those hooks](https://docs.localstack.cloud/references/init-hooks/#status-endpoint). Example: Create `ollama.sh` ```sh ollama pull llama2 ``` Copy or mount the shell under `/etc/localstack/init/ready.d/ollama.sh`. > [!NOTE] > LocalStack offers `boot`, `ready`, `shutdown` and `start` stages but not sure if all of them could have a use case in Ollama. With this enhancement starting with Ollama will improve experience with Dockerfile, Docker Compose and Testcontainers by mounting or copying files and make the model ready to use. My use case is simple to pull images but it will also help to create custom models.
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2160/reactions", "total_count": 4, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2160/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/6740
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6740/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6740/comments
https://api.github.com/repos/ollama/ollama/issues/6740/events
https://github.com/ollama/ollama/issues/6740
2,518,319,091
I_kwDOJ0Z1Ps6WGn_z
6,740
`ollama show` spaces out everything with empty lines for custom Modelfile
{ "login": "songyang-dev", "id": 10444460, "node_id": "MDQ6VXNlcjEwNDQ0NDYw", "avatar_url": "https://avatars.githubusercontent.com/u/10444460?v=4", "gravatar_id": "", "url": "https://api.github.com/users/songyang-dev", "html_url": "https://github.com/songyang-dev", "followers_url": "https://api.github.com/users/songyang-dev/followers", "following_url": "https://api.github.com/users/songyang-dev/following{/other_user}", "gists_url": "https://api.github.com/users/songyang-dev/gists{/gist_id}", "starred_url": "https://api.github.com/users/songyang-dev/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/songyang-dev/subscriptions", "organizations_url": "https://api.github.com/users/songyang-dev/orgs", "repos_url": "https://api.github.com/users/songyang-dev/repos", "events_url": "https://api.github.com/users/songyang-dev/events{/privacy}", "received_events_url": "https://api.github.com/users/songyang-dev/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
1
2024-09-11T02:33:16
2024-09-11T21:58:41
2024-09-11T21:58:41
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Running the command in WSL for default models pulled from the registry works fine. But when running the command for local models created from a Modelfile, it results in too many spaces. ```console yangs@THE-HIVEMIND:~/ai-fun$ ollama show llama3.1 Model arch llama parameters 8.0B quantization Q4_0 context length 131072 embedding length 4096 Parameters stop "<|start_header_id|>" stop "<|end_header_id|>" stop "<|eot_id|>" License LLAMA 3.1 COMMUNITY LICENSE AGREEMENT Llama 3.1 Version Release Date: July 23, 2024 yangs@THE-HIVEMIND:~/ai-fun$ ollama show adventure Model arch llama parameters 8.0B quantization Q4_0 context length 131072 embedding length 4096 Parameters stop "<|start_header_id|>" stop "<|end_header_id|>" stop "<|eot_id|>" stop "<|reserved_special_token|>" System You are a story writer... License LLAMA 3.1 COMMUNITY LICENSE AGREEMENT Llama 3.1 Version Release Date: July 23, 2024 ``` ### OS WSL2 ### GPU Nvidia ### CPU Intel ### Ollama version 0.3.9
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6740/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6740/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1985
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1985/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1985/comments
https://api.github.com/repos/ollama/ollama/issues/1985/events
https://github.com/ollama/ollama/pull/1985
2,080,578,786
PR_kwDOJ0Z1Ps5kBA9i
1,985
Disable `mmap` with lora layers
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-01-14T04:36:26
2024-01-14T04:36:32
2024-01-14T04:36:31
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1985", "html_url": "https://github.com/ollama/ollama/pull/1985", "diff_url": "https://github.com/ollama/ollama/pull/1985.diff", "patch_url": "https://github.com/ollama/ollama/pull/1985.patch", "merged_at": "2024-01-14T04:36:31" }
Fixes https://github.com/jmorganca/ollama/issues/1965
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1985/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1985/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5795
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5795/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5795/comments
https://api.github.com/repos/ollama/ollama/issues/5795/events
https://github.com/ollama/ollama/issues/5795
2,418,902,141
I_kwDOJ0Z1Ps6QLYR9
5,795
Windows: could not connect to ollama app, is it running?
{ "login": "NasonZ", "id": 66695083, "node_id": "MDQ6VXNlcjY2Njk1MDgz", "avatar_url": "https://avatars.githubusercontent.com/u/66695083?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NasonZ", "html_url": "https://github.com/NasonZ", "followers_url": "https://api.github.com/users/NasonZ/followers", "following_url": "https://api.github.com/users/NasonZ/following{/other_user}", "gists_url": "https://api.github.com/users/NasonZ/gists{/gist_id}", "starred_url": "https://api.github.com/users/NasonZ/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NasonZ/subscriptions", "organizations_url": "https://api.github.com/users/NasonZ/orgs", "repos_url": "https://api.github.com/users/NasonZ/repos", "events_url": "https://api.github.com/users/NasonZ/events{/privacy}", "received_events_url": "https://api.github.com/users/NasonZ/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
12
2024-07-19T13:18:43
2024-11-16T04:46:18
2024-07-19T17:50:41
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Logged on to my machine today to find ollama has stopped working. There's been no changes I can think of between when it was working 12 hours ago and now. I've tried uninstalling and reinstalling llama but this didn't help. ``` >ollama --version Warning: could not connect to a running Ollama instance Warning: client version is 0.2.7 >ollama list Error: could not connect to ollama app, is it running? ``` App logs: [app.log](https://github.com/user-attachments/files/16312415/app.log) ### OS Windows ### GPU Nvidia ### CPU AMD ### Ollama version 0.2.7
{ "login": "NasonZ", "id": 66695083, "node_id": "MDQ6VXNlcjY2Njk1MDgz", "avatar_url": "https://avatars.githubusercontent.com/u/66695083?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NasonZ", "html_url": "https://github.com/NasonZ", "followers_url": "https://api.github.com/users/NasonZ/followers", "following_url": "https://api.github.com/users/NasonZ/following{/other_user}", "gists_url": "https://api.github.com/users/NasonZ/gists{/gist_id}", "starred_url": "https://api.github.com/users/NasonZ/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NasonZ/subscriptions", "organizations_url": "https://api.github.com/users/NasonZ/orgs", "repos_url": "https://api.github.com/users/NasonZ/repos", "events_url": "https://api.github.com/users/NasonZ/events{/privacy}", "received_events_url": "https://api.github.com/users/NasonZ/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5795/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5795/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2376
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2376/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2376/comments
https://api.github.com/repos/ollama/ollama/issues/2376/events
https://github.com/ollama/ollama/pull/2376
2,121,582,293
PR_kwDOJ0Z1Ps5mL7bH
2,376
OpenAI API compatibility
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
6
2024-02-06T20:08:06
2024-02-10T03:26:46
2024-02-07T22:24:30
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2376", "html_url": "https://github.com/ollama/ollama/pull/2376", "diff_url": "https://github.com/ollama/ollama/pull/2376.diff", "patch_url": "https://github.com/ollama/ollama/pull/2376.patch", "merged_at": "2024-02-07T22:24:29" }
This adds experimental compatibility with the OpenAI Chat Completions (i.e. `/v1/chat/completions`) API. Details on compatibility and supported fields are in`docs/openai.md` Fixes #305
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2376/reactions", "total_count": 44, "+1": 0, "-1": 0, "laugh": 0, "hooray": 19, "confused": 0, "heart": 10, "rocket": 15, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2376/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2335
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2335/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2335/comments
https://api.github.com/repos/ollama/ollama/issues/2335/events
https://github.com/ollama/ollama/issues/2335
2,116,323,257
I_kwDOJ0Z1Ps5-JIe5
2,335
Setting OLLAMA_ORIGINS
{ "login": "prologic", "id": 1290234, "node_id": "MDQ6VXNlcjEyOTAyMzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/1290234?v=4", "gravatar_id": "", "url": "https://api.github.com/users/prologic", "html_url": "https://github.com/prologic", "followers_url": "https://api.github.com/users/prologic/followers", "following_url": "https://api.github.com/users/prologic/following{/other_user}", "gists_url": "https://api.github.com/users/prologic/gists{/gist_id}", "starred_url": "https://api.github.com/users/prologic/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/prologic/subscriptions", "organizations_url": "https://api.github.com/users/prologic/orgs", "repos_url": "https://api.github.com/users/prologic/repos", "events_url": "https://api.github.com/users/prologic/events{/privacy}", "received_events_url": "https://api.github.com/users/prologic/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-02-03T07:22:32
2024-04-19T15:41:41
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I came across this nifty little Chrome extensions called [Lumos](https://github.com/andrewnguonly/Lumos) and according to it's docs I have to run `ollama` like this: ```console OLLAMA_ORIGINS=chrome-extension://* ollama serve ``` I _actually_ happen to run the Ollama macOS App that automatically updated and launched at startup with a little tray icon. I know I can edit it's launchctl configuration file, but that'll get overridden on the next update. Can we have a simple "Settings" panel on the GUI so we can add things like this? 🙏
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2335/reactions", "total_count": 12, "+1": 9, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 2, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/2335/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1857
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1857/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1857/comments
https://api.github.com/repos/ollama/ollama/issues/1857/events
https://github.com/ollama/ollama/pull/1857
2,070,914,039
PR_kwDOJ0Z1Ps5jgC6j
1,857
[Python] Introduce Official Async Python Client
{ "login": "juharris", "id": 1594505, "node_id": "MDQ6VXNlcjE1OTQ1MDU=", "avatar_url": "https://avatars.githubusercontent.com/u/1594505?v=4", "gravatar_id": "", "url": "https://api.github.com/users/juharris", "html_url": "https://github.com/juharris", "followers_url": "https://api.github.com/users/juharris/followers", "following_url": "https://api.github.com/users/juharris/following{/other_user}", "gists_url": "https://api.github.com/users/juharris/gists{/gist_id}", "starred_url": "https://api.github.com/users/juharris/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/juharris/subscriptions", "organizations_url": "https://api.github.com/users/juharris/orgs", "repos_url": "https://api.github.com/users/juharris/repos", "events_url": "https://api.github.com/users/juharris/events{/privacy}", "received_events_url": "https://api.github.com/users/juharris/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
4
2024-01-08T17:23:53
2024-01-11T23:51:27
2024-01-08T19:25:40
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
true
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1857", "html_url": "https://github.com/ollama/ollama/pull/1857", "diff_url": "https://github.com/ollama/ollama/pull/1857.diff", "patch_url": "https://github.com/ollama/ollama/pull/1857.patch", "merged_at": null }
WIP
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1857/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1857/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7586
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7586/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7586/comments
https://api.github.com/repos/ollama/ollama/issues/7586/events
https://github.com/ollama/ollama/pull/7586
2,645,690,327
PR_kwDOJ0Z1Ps6BY6VP
7,586
cmd: preserve exact bytes when displaying template/system layers
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-11-09T06:22:02
2024-11-14T07:53:32
2024-11-14T07:53:30
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7586", "html_url": "https://github.com/ollama/ollama/pull/7586", "diff_url": "https://github.com/ollama/ollama/pull/7586.diff", "patch_url": "https://github.com/ollama/ollama/pull/7586.patch", "merged_at": "2024-11-14T07:53:30" }
Stop corrupting the output of template and system layers with extraneous newlines, which can alter model behavior if the output is used as input to a new model.
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7586/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7586/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6596
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6596/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6596/comments
https://api.github.com/repos/ollama/ollama/issues/6596/events
https://github.com/ollama/ollama/issues/6596
2,501,463,875
I_kwDOJ0Z1Ps6VGU9D
6,596
Unloading a model
{ "login": "tallesl", "id": 3655047, "node_id": "MDQ6VXNlcjM2NTUwNDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3655047?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tallesl", "html_url": "https://github.com/tallesl", "followers_url": "https://api.github.com/users/tallesl/followers", "following_url": "https://api.github.com/users/tallesl/following{/other_user}", "gists_url": "https://api.github.com/users/tallesl/gists{/gist_id}", "starred_url": "https://api.github.com/users/tallesl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tallesl/subscriptions", "organizations_url": "https://api.github.com/users/tallesl/orgs", "repos_url": "https://api.github.com/users/tallesl/repos", "events_url": "https://api.github.com/users/tallesl/events{/privacy}", "received_events_url": "https://api.github.com/users/tallesl/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
4
2024-09-02T18:50:14
2024-09-11T16:13:02
2024-09-04T23:36:22
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Is `systemctl restart ollama` the way to go?
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6596/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6596/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3050
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3050/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3050/comments
https://api.github.com/repos/ollama/ollama/issues/3050/events
https://github.com/ollama/ollama/issues/3050
2,178,169,201
I_kwDOJ0Z1Ps6B1Dlx
3,050
API: /v1/models got 404 page not found
{ "login": "github-lly", "id": 159240728, "node_id": "U_kgDOCX3SGA", "avatar_url": "https://avatars.githubusercontent.com/u/159240728?v=4", "gravatar_id": "", "url": "https://api.github.com/users/github-lly", "html_url": "https://github.com/github-lly", "followers_url": "https://api.github.com/users/github-lly/followers", "following_url": "https://api.github.com/users/github-lly/following{/other_user}", "gists_url": "https://api.github.com/users/github-lly/gists{/gist_id}", "starred_url": "https://api.github.com/users/github-lly/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/github-lly/subscriptions", "organizations_url": "https://api.github.com/users/github-lly/orgs", "repos_url": "https://api.github.com/users/github-lly/repos", "events_url": "https://api.github.com/users/github-lly/events{/privacy}", "received_events_url": "https://api.github.com/users/github-lly/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-03-11T03:48:06
2024-03-11T05:38:29
2024-03-11T05:38:29
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Ollama is running, but _/v1/models_ got 404 page not found ![image](https://github.com/ollama/ollama/assets/159240728/b7cd8321-604b-4b78-9190-2fb1a4c57ea3) ![image](https://github.com/ollama/ollama/assets/159240728/2687a3c7-1b1a-415e-acc4-ad894e26da43)
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3050/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3050/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3871
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3871/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3871/comments
https://api.github.com/repos/ollama/ollama/issues/3871/events
https://github.com/ollama/ollama/issues/3871
2,260,563,988
I_kwDOJ0Z1Ps6GvXgU
3,871
llama3_instruct_70b_q8 does not work properly in Ollama.
{ "login": "17Reset", "id": 122418720, "node_id": "U_kgDOB0v2IA", "avatar_url": "https://avatars.githubusercontent.com/u/122418720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/17Reset", "html_url": "https://github.com/17Reset", "followers_url": "https://api.github.com/users/17Reset/followers", "following_url": "https://api.github.com/users/17Reset/following{/other_user}", "gists_url": "https://api.github.com/users/17Reset/gists{/gist_id}", "starred_url": "https://api.github.com/users/17Reset/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/17Reset/subscriptions", "organizations_url": "https://api.github.com/users/17Reset/orgs", "repos_url": "https://api.github.com/users/17Reset/repos", "events_url": "https://api.github.com/users/17Reset/events{/privacy}", "received_events_url": "https://api.github.com/users/17Reset/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
6
2024-04-24T07:19:36
2024-06-16T02:48:19
2024-05-21T17:44:22
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I added the model in gguf format from llama3_instruct_70b_q8 to ollama and used ollama for inference, and got the error shown below: ``` time=2024-04-24T15:08:27.886+08:00 level=WARN source=server.go:51 msg="requested context length is greater than model max context length" requested=4096 model=0 time=2024-04-24T15:08:27.886+08:00 level=INFO source=gpu.go:121 msg="Detecting GPU type" time=2024-04-24T15:08:27.886+08:00 level=INFO source=gpu.go:268 msg="Searching for GPU management library libcudart.so*" time=2024-04-24T15:08:27.895+08:00 level=INFO source=gpu.go:314 msg="Discovered GPU libraries: [/tmp/ollama2048460055/runners/cuda_v11/libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.12.4.127]" time=2024-04-24T15:08:27.896+08:00 level=INFO source=gpu.go:126 msg="Nvidia GPU detected via cudart" time=2024-04-24T15:08:27.897+08:00 level=INFO source=cpu_common.go:11 msg="CPU has AVX2" time=2024-04-24T15:08:28.284+08:00 level=INFO source=gpu.go:202 msg="[cudart] CUDART CUDA Compute Capability detected: 8.9" time=2024-04-24T15:08:28.346+08:00 level=INFO source=gpu.go:121 msg="Detecting GPU type" time=2024-04-24T15:08:28.346+08:00 level=INFO source=gpu.go:268 msg="Searching for GPU management library libcudart.so*" time=2024-04-24T15:08:28.347+08:00 level=INFO source=gpu.go:314 msg="Discovered GPU libraries: [/tmp/ollama2048460055/runners/cuda_v11/libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.12.4.127]" time=2024-04-24T15:08:28.348+08:00 level=INFO source=gpu.go:126 msg="Nvidia GPU detected via cudart" time=2024-04-24T15:08:28.348+08:00 level=INFO source=cpu_common.go:11 msg="CPU has AVX2" time=2024-04-24T15:08:28.457+08:00 level=INFO source=gpu.go:202 msg="[cudart] CUDART CUDA Compute Capability detected: 8.9" 2024/04/24 15:08:28 [Recovery] 2024/04/24 - 15:08:28 panic recovered: runtime error: integer divide by zero runtime/panic.go:240 (0x45a45d) github.com/ollama/ollama/llm/server.go:71 (0x8c4198) github.com/ollama/ollama/server/routes.go:101 (0xee875d) github.com/ollama/ollama/server/routes.go:1295 (0xef47ea) github.com/gin-gonic/gin@v1.9.1/context.go:174 (0xebb84a) github.com/ollama/ollama/server/routes.go:1017 (0xef2f7c) github.com/gin-gonic/gin@v1.9.1/context.go:174 (0xec8739) github.com/gin-gonic/gin@v1.9.1/recovery.go:102 (0xec8727) github.com/gin-gonic/gin@v1.9.1/context.go:174 (0xec787c) github.com/gin-gonic/gin@v1.9.1/logger.go:240 (0xec7863) github.com/gin-gonic/gin@v1.9.1/context.go:174 (0xec6d6d) github.com/gin-gonic/gin@v1.9.1/gin.go:620 (0xec69fc) github.com/gin-gonic/gin@v1.9.1/gin.go:576 (0xec6531) net/http/server.go:3137 (0x721f4d) net/http/server.go:2039 (0x71d307) runtime/asm_amd64.s:1695 (0x491a20) [GIN] 2024/04/24 - 15:08:28 | 500 | 639.135847ms | 192.168.18.174 | POST "/api/chat" ```
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3871/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3871/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3613
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3613/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3613/comments
https://api.github.com/repos/ollama/ollama/issues/3613/events
https://github.com/ollama/ollama/issues/3613
2,239,398,606
I_kwDOJ0Z1Ps6FeoLO
3,613
langchain embedding from remote server
{ "login": "Ana0112", "id": 111160059, "node_id": "U_kgDOBqAq-w", "avatar_url": "https://avatars.githubusercontent.com/u/111160059?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Ana0112", "html_url": "https://github.com/Ana0112", "followers_url": "https://api.github.com/users/Ana0112/followers", "following_url": "https://api.github.com/users/Ana0112/following{/other_user}", "gists_url": "https://api.github.com/users/Ana0112/gists{/gist_id}", "starred_url": "https://api.github.com/users/Ana0112/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ana0112/subscriptions", "organizations_url": "https://api.github.com/users/Ana0112/orgs", "repos_url": "https://api.github.com/users/Ana0112/repos", "events_url": "https://api.github.com/users/Ana0112/events{/privacy}", "received_events_url": "https://api.github.com/users/Ana0112/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
5
2024-04-12T08:32:03
2024-04-15T19:15:57
2024-04-15T19:15:57
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I am using this code [langchain](https://github.com/ollama/ollama/blob/main/docs/tutorials/langchainpy.md) to get embeddings. Code - ``` loader = PyPDFDirectoryLoader("data") data = loader.load() from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter=RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(data) from langchain.embeddings import OllamaEmbeddings from langchain.vectorstores import Chroma oembed = OllamaEmbeddings(base_url="https://11aa-11-111-111-111.ngrok-free.app/:11434", model="nomic-embed-text") ``` Upto this, code is working fine. The following line is throwing error :- ``` vectorstore = Chroma.from_documents(documents=all_splits, embedding=oembed) ``` `ValueError: Error raised by inference API HTTP code: 404, 404 page not found` I want to use these embeddings for RAG, with groq ``` rag_template = """Answer the question based only on the following context: {context} Question: {question} """ rag_prompt = ChatPromptTemplate.from_template(rag_template) rag_chain = ( {"context": retriever, "question": RunnablePassthrough()} | rag_prompt | llm | StrOutputParser() ) ``` I am able to generate collection using chromaDB standalone (i.e. not using langchain), using this code [embedding-model](https://ollama.com/blog/embedding-models) but then I don't know how to use the generated embeddings with groq llm for RAG. ### What did you expect to see? chroma db should have generated the vectorstore ### Steps to reproduce _No response_ ### Are there any recent changes that introduced the issue? _No response_ ### OS Linux ### Architecture _No response_ ### Platform _No response_ ### Ollama version _No response_ ### GPU _No response_ ### GPU info _No response_ ### CPU _No response_ ### Other software _No response_
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3613/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3613/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6973
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6973/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6973/comments
https://api.github.com/repos/ollama/ollama/issues/6973/events
https://github.com/ollama/ollama/issues/6973
2,549,405,305
I_kwDOJ0Z1Ps6X9NZ5
6,973
Model cannot be automatically unloading
{ "login": "Han-Huaqiao", "id": 41456966, "node_id": "MDQ6VXNlcjQxNDU2OTY2", "avatar_url": "https://avatars.githubusercontent.com/u/41456966?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Han-Huaqiao", "html_url": "https://github.com/Han-Huaqiao", "followers_url": "https://api.github.com/users/Han-Huaqiao/followers", "following_url": "https://api.github.com/users/Han-Huaqiao/following{/other_user}", "gists_url": "https://api.github.com/users/Han-Huaqiao/gists{/gist_id}", "starred_url": "https://api.github.com/users/Han-Huaqiao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Han-Huaqiao/subscriptions", "organizations_url": "https://api.github.com/users/Han-Huaqiao/orgs", "repos_url": "https://api.github.com/users/Han-Huaqiao/repos", "events_url": "https://api.github.com/users/Han-Huaqiao/events{/privacy}", "received_events_url": "https://api.github.com/users/Han-Huaqiao/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6677367769, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCL2Q", "url": "https://api.github.com/repos/ollama/ollama/labels/needs%20more%20info", "name": "needs more info", "color": "BA8041", "default": false, "description": "More information is needed to assist" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
2
2024-09-26T03:11:14
2024-10-23T00:15:36
2024-10-23T00:15:35
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I ran the following model, but keep_alive is at the default value. I haven't requested the above model for more than 10 hours, why is the following model not automatically uninstalled? Why does the `UNTIL` column of `ollama ps` change to the model's start time (how long ago was the model started)? How can I manually uninstall the above model? ``` python NAME ID SIZE PROCESSOR UNTIL LongWriter-llama3.1-8b-quantized:latest 1f0dfed4427c 6.7 GB 100% GPU 4 minutes from now LongWriter-glm4-9b-quantized:latest e37d70795165 29 GB 100% GPU 19 hours ago qwen2-tools:7b 474fd321869c 5.7 GB 100% GPU 19 hours ago llama3.1:latest 91ab477bec9d 6.7 GB 100% GPU 19 hours ago LongWriter-glm4-9b:latest 1d3f6bf6ada8 6.6 GB 100% GPU 19 hours ago ``` ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.1.10
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6973/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6973/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8187
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8187/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8187/comments
https://api.github.com/repos/ollama/ollama/issues/8187/events
https://github.com/ollama/ollama/issues/8187
2,753,278,895
I_kwDOJ0Z1Ps6kG7Ov
8,187
Error: context deadline exceeded when running a huggingface model
{ "login": "ithax-wb", "id": 159031797, "node_id": "U_kgDOCXqh9Q", "avatar_url": "https://avatars.githubusercontent.com/u/159031797?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ithax-wb", "html_url": "https://github.com/ithax-wb", "followers_url": "https://api.github.com/users/ithax-wb/followers", "following_url": "https://api.github.com/users/ithax-wb/following{/other_user}", "gists_url": "https://api.github.com/users/ithax-wb/gists{/gist_id}", "starred_url": "https://api.github.com/users/ithax-wb/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ithax-wb/subscriptions", "organizations_url": "https://api.github.com/users/ithax-wb/orgs", "repos_url": "https://api.github.com/users/ithax-wb/repos", "events_url": "https://api.github.com/users/ithax-wb/events{/privacy}", "received_events_url": "https://api.github.com/users/ithax-wb/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
2024-12-20T18:45:15
2024-12-20T21:31:05
2024-12-20T21:31:04
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I tried to load a models from huggingface. I updated my docker to the latest ollama version before I started. I tried it with 3 models from huggingface I used this command and models: docker exec -it ollama ollama run hf.co/DevQuasar/utter-project.EuroLLM-9B-Instruct-GGUF:Q4_K_M docker exec -it ollama ollama run hf.co/mradermacher/Teuken-7B-instruct-commercial-v0.4-GGUF:Q4_K_M docker exec -it ollama ollama run hf.co/Triangle104/EuroLLM-9B-Instruct-Q4_K_M-GGUF At some point the error occured: Error: context deadline exceeded I tried it multiple times, sometimes it seems that it came a bit futher and downloaded more stuff. But it the end this error occured. ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version Latest - Ollama 0.5.4
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8187/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8187/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5866
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5866/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5866/comments
https://api.github.com/repos/ollama/ollama/issues/5866/events
https://github.com/ollama/ollama/issues/5866
2,424,162,194
I_kwDOJ0Z1Ps6QfceS
5,866
about mistral-nemo:12b using issue , please help
{ "login": "ryanlxb", "id": 28553693, "node_id": "MDQ6VXNlcjI4NTUzNjkz", "avatar_url": "https://avatars.githubusercontent.com/u/28553693?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ryanlxb", "html_url": "https://github.com/ryanlxb", "followers_url": "https://api.github.com/users/ryanlxb/followers", "following_url": "https://api.github.com/users/ryanlxb/following{/other_user}", "gists_url": "https://api.github.com/users/ryanlxb/gists{/gist_id}", "starred_url": "https://api.github.com/users/ryanlxb/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ryanlxb/subscriptions", "organizations_url": "https://api.github.com/users/ryanlxb/orgs", "repos_url": "https://api.github.com/users/ryanlxb/repos", "events_url": "https://api.github.com/users/ryanlxb/events{/privacy}", "received_events_url": "https://api.github.com/users/ryanlxb/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-07-23T03:28:18
2024-07-23T04:41:30
2024-07-23T03:34:53
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ollama pull mistral-nemo:12b pulling manifest pulling b559938ab7a0... 100% ▕██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ 7.1 GB pulling f023d1ce0e55... 100% ▕██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ 688 B pulling 8fe8671a714f... 100% ▕██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ 11 KB pulling ed11eda7790d... 100% ▕██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ 30 B pulling 65d37de20e59... 100% ▕██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ 486 B verifying sha256 digest writing manifest removing any unused layers success ollama run mistral-nemo:12b Error: llama runner process has terminated: signal: abort trap error:check_tensor_dims: tensor 'blk.0.attn_q.weight' has wrong shape; expected 5120, 5120, got 5120, 4096, 1, 1 ollama --version ollama version is 0.2.7 ### OS macOS ### GPU Apple ### CPU Apple ### Ollama version 0.2.7
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5866/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5866/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6803
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6803/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6803/comments
https://api.github.com/repos/ollama/ollama/issues/6803/events
https://github.com/ollama/ollama/issues/6803
2,526,474,632
I_kwDOJ0Z1Ps6WlvGI
6,803
Support AMD RX580 graphics card
{ "login": "Tamila-2017", "id": 27445399, "node_id": "MDQ6VXNlcjI3NDQ1Mzk5", "avatar_url": "https://avatars.githubusercontent.com/u/27445399?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Tamila-2017", "html_url": "https://github.com/Tamila-2017", "followers_url": "https://api.github.com/users/Tamila-2017/followers", "following_url": "https://api.github.com/users/Tamila-2017/following{/other_user}", "gists_url": "https://api.github.com/users/Tamila-2017/gists{/gist_id}", "starred_url": "https://api.github.com/users/Tamila-2017/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Tamila-2017/subscriptions", "organizations_url": "https://api.github.com/users/Tamila-2017/orgs", "repos_url": "https://api.github.com/users/Tamila-2017/repos", "events_url": "https://api.github.com/users/Tamila-2017/events{/privacy}", "received_events_url": "https://api.github.com/users/Tamila-2017/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 6433346500, "node_id": "LA_kwDOJ0Z1Ps8AAAABf3UTxA", "url": "https://api.github.com/repos/ollama/ollama/labels/amd", "name": "amd", "color": "000000", "default": false, "description": "Issues relating to AMD GPUs and ROCm" }, { "id": 6677745918, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgZQ_g", "url": "https://api.github.com/repos/ollama/ollama/labels/gpu", "name": "gpu", "color": "76C49E", "default": false, "description": "" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
3
2024-09-14T15:28:35
2024-09-21T00:44:49
2024-09-21T00:44:49
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello, Does your project support AMD RX580 or RX480 graphics card?
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6803/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6803/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5668
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5668/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5668/comments
https://api.github.com/repos/ollama/ollama/issues/5668/events
https://github.com/ollama/ollama/issues/5668
2,406,818,864
I_kwDOJ0Z1Ps6PdSQw
5,668
Glm4 in ollama v0.2.3 still returns gibberish G's
{ "login": "loveyume520", "id": 166564647, "node_id": "U_kgDOCe2TJw", "avatar_url": "https://avatars.githubusercontent.com/u/166564647?v=4", "gravatar_id": "", "url": "https://api.github.com/users/loveyume520", "html_url": "https://github.com/loveyume520", "followers_url": "https://api.github.com/users/loveyume520/followers", "following_url": "https://api.github.com/users/loveyume520/following{/other_user}", "gists_url": "https://api.github.com/users/loveyume520/gists{/gist_id}", "starred_url": "https://api.github.com/users/loveyume520/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/loveyume520/subscriptions", "organizations_url": "https://api.github.com/users/loveyume520/orgs", "repos_url": "https://api.github.com/users/loveyume520/repos", "events_url": "https://api.github.com/users/loveyume520/events{/privacy}", "received_events_url": "https://api.github.com/users/loveyume520/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
42
2024-07-13T09:29:40
2025-01-07T15:59:26
2024-09-12T21:50:14
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? After running for a while, the model still returns gibberish: ``` [12:59:39] [INFO] [Part of Speech Determination] [Fixed] JSON string: Since you did not provide specific content text, I cannot perform actual word frequency analysis, context analysis, etc. Therefore, I will provide a hypothetical example to demonstrate how to make judgments according to the steps. { "person": "Yes", "explanation": [ { "step": 1, "detail": "The word appears frequently in the text" }, { "step": 2, "detail": "The word often appears in sentence structures as a subject or object, such as 'Jack is playing games' where 'Jack' is the subject" }, { "step": 3, "detail": "The word is not written in Katakana, which does not match the characteristics of a proper noun" }, { "step": 4, "detail": "Through dependency syntax analysis, it is determined that the word is used as a noun, serving as a subject or object" }, { "step": 5, "detail": "There is a clear role behavior description in the text, such as 'Jack jumps high' where 'Jack' is the subject" }, { "step": 6, "detail": "The word appears in dialogue, such as 'A: Hello, I am Jack. B: Hi, Jack!'" }, { "step": 7, "detail": "The usage of the word remains consistent across different paragraphs and scenes" }, { "step": 8, !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!} [12:59:39] [WARNING] [Part of Speech Determination] Subtask execution failed, will retry later ... Expecting value: line 1 column 1 (char 0) [12:59:39] [INFO] About to start executing [Semantic Analysis] ... [12:59:44] [INFO] [Semantic Analysis] [Raw] LLM response: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! [12:59:44] [INFO] [Semantic Analysis] [Fixed] JSON string: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!} . . . Press any key to continue . . . (base) PS C:\Users\account\Desktop> ollama --version ollama version is 0.2.3 ``` Then try posting and it respond: ``` { "id": "chatcmpl-991", "object": "chat.completion", "created": 1720861217, "model": "glm-4-9b-chat", "system_fingerprint": "fp_ollama", "choices": [ { "index": 0, "message": { "role": "assistant", "content": "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG" }, "finish_reason": null } ], "usage": { "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0 } } ``` Here's ollama serve: ``` 2024/07/13 12:58:16 routes.go:940: INFO server config env="map[CUDA_VISIBLE_DEVICES: GPU_DEVICE_ORDINAL: HIP_VISIBLE_DEVICES: HSA_OVERRIDE_GFX_VERSION: OLLAMA_DEBUG:false OLLAMA_FLASH_ATTENTION:false OLLAMA_HOST:http://127.0.0.1:11434 OLLAMA_INTEL_GPU:false OLLAMA_KEEP_ALIVE:5m0s OLLAMA_LLM_LIBRARY: OLLAMA_MAX_LOADED_MODELS:0 OLLAMA_MAX_QUEUE:512 OLLAMA_MAX_VRAM:0 OLLAMA_MODELS:C:\\Users\\Ototsuyume\\.ollama\\models OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:0 OLLAMA_ORIGINS:[http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:* app://* file://* tauri://*] OLLAMA_RUNNERS_DIR:C:\\Users\\Ototsuyume\\AppData\\Local\\Programs\\Ollama\\ollama_runners OLLAMA_SCHED_SPREAD:false OLLAMA_TMPDIR: ROCR_VISIBLE_DEVICES:]" time=2024-07-13T12:58:16.286 level=INFO source=images.go:760 msg="total blobs: 17" time=2024-07-13T12:58:16.287 level=INFO source=images.go:767 msg="total unused blobs removed: 0" time=2024-07-13T12:58:16.288 level=INFO source=routes.go:987 msg="Listening on 127.0.0.1:11434 (version 0.2.3)" time=2024-07-13T12:58:16.289 level=INFO source=payload.go:44 msg="Dynamic LLM libraries [cpu cpu_avx cpu_avx2 cuda_v11.3 rocm_v6.1]" time=2024-07-13T12:58:16.289 level=INFO source=gpu.go:205 msg="looking for compatible GPUs" time=2024-07-13T12:58:16.631 level=INFO source=types.go:105 msg="inference compute" id=0 library=rocm compute=gfx1030 driver=5.7 name="AMD Radeon RX 6800 XT" total="16.0 GiB" available="15.9 GiB" time=2024-07-13T12:58:30.651 level=INFO source=sched.go:179 msg="one or more GPUs detected that are unable to accurately report free memory - disabling default concurrency" time=2024-07-13T12:58:30.663 level=INFO source=sched.go:701 msg="new model will fit in available VRAM in single GPU, loading" model=C:\Users\Ototsuyume\.ollama\models\blobs\sha256-eb30fa5273749385c6a42b8df12a692ea3ab552fbf8883ce87af9938f69e9f4c gpu=0 parallel=4 available=17028874240 required="6.9 GiB" time=2024-07-13T12:58:30.664 level=INFO source=memory.go:309 msg="offload to rocm" layers.requested=-1 layers.model=41 layers.offload=41 layers.split="" memory.available="[15.9 GiB]" memory.required.full="6.9 GiB" memory.required.partial="6.9 GiB" memory.required.kv="320.0 MiB" memory.required.allocations="[6.9 GiB]" memory.weights.total="5.3 GiB" memory.weights.repeating="4.9 GiB" memory.weights.nonrepeating="485.6 MiB" memory.graph.full="561.0 MiB" memory.graph.partial="789.6 MiB" time=2024-07-13T12:58:30.670 level=INFO source=server.go:383 msg="starting llama server" cmd="C:\\Users\\Ototsuyume\\AppData\\Local\\Programs\\Ollama\\ollama_runners\\rocm_v6.1\\ollama_llama_server.exe --model C:\\Users\\Ototsuyume\\.ollama\\models\\blobs\\sha256-eb30fa5273749385c6a42b8df12a692ea3ab552fbf8883ce87af9938f69e9f4c --ctx-size 8192 --batch-size 512 --embedding --log-disable --n-gpu-layers 41 --parallel 4 --port 10596" time=2024-07-13T12:58:30.694 level=INFO source=sched.go:437 msg="loaded runners" count=1 time=2024-07-13T12:58:30.694 level=INFO source=server.go:571 msg="waiting for llama runner to start responding" time=2024-07-13T12:58:30.695 level=INFO source=server.go:612 msg="waiting for server to become available" status="llm server error" INFO [wmain] build info | build=3337 commit="a8db2a9c" tid="3896" timestamp=1720861110 INFO [wmain] system info | n_threads=6 n_threads_batch=-1 system_info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 0 | " tid="3896" timestamp=1720861110 total_threads=12 INFO [wmain] HTTP server listening | hostname="127.0.0.1" n_threads_http="11" port="10596" tid="3896" timestamp=1720861110 llama_model_loader: loaded meta data with 24 key-value pairs and 283 tensors from C:\Users\Ototsuyume\.ollama\models\blobs\sha256-eb30fa5273749385c6a42b8df12a692ea3ab552fbf8883ce87af9938f69e9f4c (version GGUF V3 (latest)) llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = chatglm llama_model_loader: - kv 1: general.name str = glm-4-9b-chat llama_model_loader: - kv 2: chatglm.context_length u32 = 131072 llama_model_loader: - kv 3: chatglm.embedding_length u32 = 4096 llama_model_loader: - kv 4: chatglm.feed_forward_length u32 = 13696 llama_model_loader: - kv 5: chatglm.block_count u32 = 40 llama_model_loader: - kv 6: chatglm.attention.head_count u32 = 32 llama_model_loader: - kv 7: chatglm.attention.head_count_kv u32 = 2 llama_model_loader: - kv 8: chatglm.attention.layer_norm_rms_epsilon f32 = 0.000000 llama_model_loader: - kv 9: general.file_type u32 = 15 llama_model_loader: - kv 10: chatglm.rope.dimension_count u32 = 64 llama_model_loader: - kv 11: tokenizer.ggml.add_bos_token bool = false llama_model_loader: - kv 12: chatglm.rope.freq_base f32 = 5000000.000000 llama_model_loader: - kv 13: tokenizer.ggml.model str = gpt2 llama_model_loader: - kv 14: tokenizer.ggml.pre str = chatglm-bpe llama_model_loader: - kv 15: tokenizer.ggml.tokens arr[str,151552] = ["!", "\"", "#", "$", "%", "&", "'", ... llama_model_loader: - kv 16: tokenizer.ggml.token_type arr[i32,151552] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... llama_model_loader: - kv 17: tokenizer.ggml.merges arr[str,151073] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",... llama_model_loader: - kv 18: tokenizer.ggml.padding_token_id u32 = 151329 llama_model_loader: - kv 19: tokenizer.ggml.eos_token_id u32 = 151329 llama_model_loader: - kv 20: tokenizer.ggml.eot_token_id u32 = 151336 llama_model_loader: - kv 21: tokenizer.ggml.unknown_token_id u32 = 151329 llama_model_loader: - kv 22: tokenizer.chat_template str = [gMASK]<sop>{% for item in messages %... llama_model_loader: - kv 23: general.quantization_version u32 = 2 llama_model_loader: - type f32: 121 tensors llama_model_loader: - type q5_0: 20 tensors llama_model_loader: - type q8_0: 20 tensors llama_model_loader: - type q4_K: 81 tensors llama_model_loader: - type q5_K: 40 tensors llama_model_loader: - type q6_K: 1 tensors time=2024-07-13T12:58:30.961 level=INFO source=server.go:612 msg="waiting for server to become available" status="llm server loading model" llm_load_vocab: special tokens cache size = 223 llm_load_vocab: token to piece cache size = 0.9732 MB llm_load_print_meta: format = GGUF V3 (latest) llm_load_print_meta: arch = chatglm llm_load_print_meta: vocab type = BPE llm_load_print_meta: n_vocab = 151552 llm_load_print_meta: n_merges = 151073 llm_load_print_meta: vocab_only = 0 llm_load_print_meta: n_ctx_train = 131072 llm_load_print_meta: n_embd = 4096 llm_load_print_meta: n_layer = 40 llm_load_print_meta: n_head = 32 llm_load_print_meta: n_head_kv = 2 llm_load_print_meta: n_rot = 64 llm_load_print_meta: n_swa = 0 llm_load_print_meta: n_embd_head_k = 128 llm_load_print_meta: n_embd_head_v = 128 llm_load_print_meta: n_gqa = 16 llm_load_print_meta: n_embd_k_gqa = 256 llm_load_print_meta: n_embd_v_gqa = 256 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.6e-07 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: f_logit_scale = 0.0e+00 llm_load_print_meta: n_ff = 13696 llm_load_print_meta: n_expert = 0 llm_load_print_meta: n_expert_used = 0 llm_load_print_meta: causal attn = 1 llm_load_print_meta: pooling type = 0 llm_load_print_meta: rope type = 0 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 5000000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_ctx_orig_yarn = 131072 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: ssm_d_conv = 0 llm_load_print_meta: ssm_d_inner = 0 llm_load_print_meta: ssm_d_state = 0 llm_load_print_meta: ssm_dt_rank = 0 llm_load_print_meta: model type = 9B llm_load_print_meta: model ftype = Q4_K - Medium llm_load_print_meta: model params = 9.40 B llm_load_print_meta: model size = 5.82 GiB (5.31 BPW) llm_load_print_meta: general.name = glm-4-9b-chat llm_load_print_meta: EOS token = 151329 '<|endoftext|>' llm_load_print_meta: UNK token = 151329 '<|endoftext|>' llm_load_print_meta: PAD token = 151329 '<|endoftext|>' llm_load_print_meta: LF token = 128 'Ä' llm_load_print_meta: EOT token = 151336 '<|user|>' llm_load_print_meta: max token length = 1024 ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no ggml_cuda_init: found 1 ROCm devices: Device 0: AMD Radeon RX 6800 XT, compute capability 10.3, VMM: no llm_load_tensors: ggml ctx size = 0.28 MiB llm_load_tensors: offloading 40 repeating layers to GPU llm_load_tensors: offloading non-repeating layers to GPU llm_load_tensors: offloaded 41/41 layers to GPU llm_load_tensors: ROCm0 buffer size = 5622.60 MiB llm_load_tensors: CPU buffer size = 333.00 MiB llama_new_context_with_model: n_ctx = 8192 llama_new_context_with_model: n_batch = 512 llama_new_context_with_model: n_ubatch = 512 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 5000000.0 llama_new_context_with_model: freq_scale = 1 llama_kv_cache_init: ROCm0 KV buffer size = 320.00 MiB llama_new_context_with_model: KV self size = 320.00 MiB, K (f16): 160.00 MiB, V (f16): 160.00 MiB llama_new_context_with_model: ROCm_Host output buffer size = 2.38 MiB llama_new_context_with_model: ROCm0 compute buffer size = 561.00 MiB llama_new_context_with_model: ROCm_Host compute buffer size = 24.01 MiB llama_new_context_with_model: graph nodes = 1606 llama_new_context_with_model: graph splits = 2 INFO [wmain] model loaded | tid="3896" timestamp=1720861115 time=2024-07-13T12:58:35.360 level=INFO source=server.go:617 msg="llama runner started in 4.67 seconds" [GIN] 2024/07/13 - 12:58:35 | 200 | 5.6746358s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:04 | 200 | 6.5357628s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:06 | 200 | 8.0197978s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:07 | 200 | 9.3109135s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:08 | 200 | 10.3975869s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:09 | 200 | 5.0742796s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:11 | 200 | 5.4391138s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:11 | 200 | 3.1233214s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:12 | 200 | 5.0868503s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:13 | 200 | 3.4118485s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:20 | 200 | 8.9596744s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:27 | 200 | 433.1813ms | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:31 | 200 | 10.5772594s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:39 | 200 | 8.1706446s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:44 | 200 | 4.5071605s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:44 | 200 | 4.4032214s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:44 | 200 | 4.7316797s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:44 | 200 | 4.7339705s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:45 | 200 | 1.5329916s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:47 | 200 | 2.3864411s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:48 | 200 | 2.4289955s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:48 | 200 | 2.4288198s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:48 | 200 | 2.4720815s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:49 | 200 | 1.5614626s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:51 | 200 | 2.3866176s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:52 | 200 | 2.4702948s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:52 | 200 | 2.4698489s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:52 | 200 | 2.4714931s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:53 | 200 | 1.5666652s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:55 | 200 | 1.5533587s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:55 | 200 | 1.5873278s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:55 | 200 | 1.5888237s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:55 | 200 | 1.6187491s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:55 | 200 | 874.0734ms | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:57 | 200 | 1.1203497s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:57 | 200 | 1.1544978s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:57 | 200 | 1.1558197s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:57 | 200 | 1.1856924s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:57 | 200 | 871.3637ms | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:59 | 200 | 1.122985s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:59 | 200 | 1.1230159s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:59 | 200 | 1.1563781s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:59 | 200 | 1.1845468s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 12:59:59 | 200 | 867.6324ms | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 13:00:04 | 200 | 4.1391989s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 13:00:04 | 200 | 4.1805937s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 13:00:04 | 200 | 4.1799448s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 13:00:04 | 200 | 4.216249s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 13:00:05 | 200 | 1.0996839s | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 13:00:17 | 200 | 980.3081ms | 127.0.0.1 | POST "/v1/chat/completions" [GIN] 2024/07/13 - 13:00:51 | 200 | 0s | 127.0.0.1 | GET "/api/version" ``` ### OS Windows ### GPU AMD ### CPU AMD ### Ollama version 0.2.3
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5668/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5668/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8570
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8570/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8570/comments
https://api.github.com/repos/ollama/ollama/issues/8570/events
https://github.com/ollama/ollama/issues/8570
2,810,606,894
I_kwDOJ0Z1Ps6nhnUu
8,570
can not run this on intel Xe gpu
{ "login": "1009058470", "id": 30902531, "node_id": "MDQ6VXNlcjMwOTAyNTMx", "avatar_url": "https://avatars.githubusercontent.com/u/30902531?v=4", "gravatar_id": "", "url": "https://api.github.com/users/1009058470", "html_url": "https://github.com/1009058470", "followers_url": "https://api.github.com/users/1009058470/followers", "following_url": "https://api.github.com/users/1009058470/following{/other_user}", "gists_url": "https://api.github.com/users/1009058470/gists{/gist_id}", "starred_url": "https://api.github.com/users/1009058470/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/1009058470/subscriptions", "organizations_url": "https://api.github.com/users/1009058470/orgs", "repos_url": "https://api.github.com/users/1009058470/repos", "events_url": "https://api.github.com/users/1009058470/events{/privacy}", "received_events_url": "https://api.github.com/users/1009058470/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
2
2025-01-24T23:51:28
2025-01-25T01:43:56
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ## 硬件环境 - cpu:Intel i5-1240p / AMD - gpu:Intel Iris Xe / AMD Radeon - 内存:16GB DDR5 - os:Windows 11 ## 重现步骤 1. setx OLLAMA_DEBUG 1 2. ollama serve 2 > debug.log 3. ollama run deepseek-r1:7b <!-- Failed to upload "debug.txt" --> 4. then error showed [debug.txt](https://github.com/user-attachments/files/18544023/debug.txt) ### OS Windows ### GPU Intel ### CPU Intel ### Ollama version ollama version is 0.5.1-ipexllm-20250123 Warning: client version is 0.5.7
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8570/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8570/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/6374
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6374/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6374/comments
https://api.github.com/repos/ollama/ollama/issues/6374/events
https://github.com/ollama/ollama/pull/6374
2,468,372,359
PR_kwDOJ0Z1Ps54e5uQ
6,374
feat: support for markdown rendering in the cli - wip
{ "login": "ukd1", "id": 44345, "node_id": "MDQ6VXNlcjQ0MzQ1", "avatar_url": "https://avatars.githubusercontent.com/u/44345?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ukd1", "html_url": "https://github.com/ukd1", "followers_url": "https://api.github.com/users/ukd1/followers", "following_url": "https://api.github.com/users/ukd1/following{/other_user}", "gists_url": "https://api.github.com/users/ukd1/gists{/gist_id}", "starred_url": "https://api.github.com/users/ukd1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ukd1/subscriptions", "organizations_url": "https://api.github.com/users/ukd1/orgs", "repos_url": "https://api.github.com/users/ukd1/repos", "events_url": "https://api.github.com/users/ukd1/events{/privacy}", "received_events_url": "https://api.github.com/users/ukd1/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-08-15T16:01:11
2024-11-21T19:14:05
2024-11-21T19:14:04
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
true
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6374", "html_url": "https://github.com/ollama/ollama/pull/6374", "diff_url": "https://github.com/ollama/ollama/pull/6374.diff", "patch_url": "https://github.com/ollama/ollama/pull/6374.patch", "merged_at": null }
This is a work in progress/hack, and breaks wordwrap, and doesn't render multi-line things at all - e.g. tables. If there is interest in this, I'd be happy to finish it / make it work properly. Thoughts? This is what it looks like today: <img width="661" alt="image" src="https://github.com/user-attachments/assets/143c7e49-fd67-4603-b9f5-7c642fc578c7">
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6374/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6374/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6471
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6471/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6471/comments
https://api.github.com/repos/ollama/ollama/issues/6471/events
https://github.com/ollama/ollama/issues/6471
2,482,494,503
I_kwDOJ0Z1Ps6T99wn
6,471
Issue when running smollm:360m and also smollm:135m
{ "login": "NEWbie0709", "id": 81673708, "node_id": "MDQ6VXNlcjgxNjczNzA4", "avatar_url": "https://avatars.githubusercontent.com/u/81673708?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NEWbie0709", "html_url": "https://github.com/NEWbie0709", "followers_url": "https://api.github.com/users/NEWbie0709/followers", "following_url": "https://api.github.com/users/NEWbie0709/following{/other_user}", "gists_url": "https://api.github.com/users/NEWbie0709/gists{/gist_id}", "starred_url": "https://api.github.com/users/NEWbie0709/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NEWbie0709/subscriptions", "organizations_url": "https://api.github.com/users/NEWbie0709/orgs", "repos_url": "https://api.github.com/users/NEWbie0709/repos", "events_url": "https://api.github.com/users/NEWbie0709/events{/privacy}", "received_events_url": "https://api.github.com/users/NEWbie0709/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
4
2024-08-23T07:23:19
2024-08-27T21:37:14
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I tried running with the 1.7b version, and it ran successfully. ![image](https://github.com/user-attachments/assets/6074c785-cbb2-43e0-b82d-32fe74184840) However, when running these two smaller versions, it shows the following error. ![image](https://github.com/user-attachments/assets/419da9f0-0ea2-4795-bdab-78e457dfbd08) ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.3.6
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6471/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6471/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/3680
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3680/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3680/comments
https://api.github.com/repos/ollama/ollama/issues/3680/events
https://github.com/ollama/ollama/pull/3680
2,246,761,358
PR_kwDOJ0Z1Ps5s146L
3,680
types/model: add FilepathNoBuild
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-04-16T19:38:09
2024-04-17T01:35:44
2024-04-17T01:35:43
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3680", "html_url": "https://github.com/ollama/ollama/pull/3680", "diff_url": "https://github.com/ollama/ollama/pull/3680.diff", "patch_url": "https://github.com/ollama/ollama/pull/3680.patch", "merged_at": "2024-04-17T01:35:43" }
Also, add test for DisplayLongest. Also, plumb fill param to ParseName in MustParseName
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3680/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3680/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3245
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3245/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3245/comments
https://api.github.com/repos/ollama/ollama/issues/3245/events
https://github.com/ollama/ollama/issues/3245
2,194,926,554
I_kwDOJ0Z1Ps6C0-va
3,245
pls add Grok
{ "login": "enryteam", "id": 20081090, "node_id": "MDQ6VXNlcjIwMDgxMDkw", "avatar_url": "https://avatars.githubusercontent.com/u/20081090?v=4", "gravatar_id": "", "url": "https://api.github.com/users/enryteam", "html_url": "https://github.com/enryteam", "followers_url": "https://api.github.com/users/enryteam/followers", "following_url": "https://api.github.com/users/enryteam/following{/other_user}", "gists_url": "https://api.github.com/users/enryteam/gists{/gist_id}", "starred_url": "https://api.github.com/users/enryteam/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/enryteam/subscriptions", "organizations_url": "https://api.github.com/users/enryteam/orgs", "repos_url": "https://api.github.com/users/enryteam/repos", "events_url": "https://api.github.com/users/enryteam/events{/privacy}", "received_events_url": "https://api.github.com/users/enryteam/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-03-19T13:06:10
2024-03-19T13:17:04
2024-03-19T13:17:03
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What model would you like? pls add Grok
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3245/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3245/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6626
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6626/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6626/comments
https://api.github.com/repos/ollama/ollama/issues/6626/events
https://github.com/ollama/ollama/issues/6626
2,504,463,929
I_kwDOJ0Z1Ps6VRxY5
6,626
unable to load cuda driver library . symbol lookup for cuCtxCreate_v3 failed
{ "login": "Wangzg97", "id": 33352902, "node_id": "MDQ6VXNlcjMzMzUyOTAy", "avatar_url": "https://avatars.githubusercontent.com/u/33352902?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Wangzg97", "html_url": "https://github.com/Wangzg97", "followers_url": "https://api.github.com/users/Wangzg97/followers", "following_url": "https://api.github.com/users/Wangzg97/following{/other_user}", "gists_url": "https://api.github.com/users/Wangzg97/gists{/gist_id}", "starred_url": "https://api.github.com/users/Wangzg97/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wangzg97/subscriptions", "organizations_url": "https://api.github.com/users/Wangzg97/orgs", "repos_url": "https://api.github.com/users/Wangzg97/repos", "events_url": "https://api.github.com/users/Wangzg97/events{/privacy}", "received_events_url": "https://api.github.com/users/Wangzg97/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-09-04T06:54:46
2024-09-25T06:32:19
2024-09-04T07:01:50
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? i installed ollama by the sh file (i will show it at the end), but i found an error that ollama cannot use gpu, and i cannot find any methods to solve this problem error message is > Sep 04 14:28:32 ip ollama[22329]: time=2024-09-04T14:28:32.433+08:00 level=INFO source=images.go:753 msg="total blobs: 0" > Sep 04 14:28:32 ip ollama[22329]: time=2024-09-04T14:28:32.433+08:00 level=INFO source=images.go:760 msg="total unused blobs removed: 0" > Sep 04 14:28:32 ip ollama[22329]: time=2024-09-04T14:28:32.433+08:00 level=INFO source=routes.go:1172 msg="Listening on 127.0.0.1:11434 (version 0.3.9)" > Sep 04 14:28:32 ip ollama[22329]: time=2024-09-04T14:28:32.433+08:00 level=INFO source=payload.go:30 msg="extracting embedded files" dir=/tmp/ollama629948305/runners > Sep 04 14:28:41 ip ollama[22329]: time=2024-09-04T14:28:41.834+08:00 level=INFO source=payload.go:44 msg="Dynamic LLM libraries [cuda_v12 rocm_v60102 cpu cpu_avx cpu_avx2 cuda_v11]" > Sep 04 14:28:41 ip ollama[22329]: time=2024-09-04T14:28:41.834+08:00 level=INFO source=gpu.go:200 msg="looking for compatible GPUs" > Sep 04 14:28:41 ip ollama[22329]: time=2024-09-04T14:28:41.837+08:00 level=INFO source=gpu.go:568 msg="unable to load cuda driver library" library=/usr/lib64/libcuda.so.460.106.00 error="symbol lookup for cuCtxCreate_v3 failed: /usr/lib64/libcuda.so.460.106.00: undefined symbol: cuCtxCreate_v3" > Sep 04 14:28:42 ip ollama[22329]: time=2024-09-04T14:28:42.023+08:00 level=INFO source=types.go:107 msg="inference compute" id=GPU-ba7826fa-xxxx-xxxx-xxxx-xxxxxxxxxxxx library=cuda variant=v11 compute=8.6 driver=0.0 name="" total="22.2 GiB" available="22.0 GiB" > Sep 04 14:30:59 ip ollama[22329]: [GIN] 2024/09/04 - 14:30:59 | 200 | 68.809µs | 127.0.0.1 | HEAD "/" > Sep 04 14:30:59 ip ollama[22329]: [GIN] 2024/09/04 - 14:30:59 | 200 | 207.344µs | 127.0.0.1 | GET "/api/tags" i have installed driver and cuda > +-----------------------------------------------------------------------------+ > | NVIDIA-SMI 460.106.00 Driver Version: 460.106.00 CUDA Version: 11.2 | > |-------------------------------+----------------------+----------------------+ > | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | > | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | > | | | MIG M. | > |===============================+======================+======================| > | 0 A10 Off | 00000000:86:00.0 Off | 0 | > | 0% 27C P8 15W / 150W | 2MiB / 22731MiB | 0% Default | > | | | N/A | > +-------------------------------+----------------------+----------------------+ > > +-----------------------------------------------------------------------------+ > | Processes: | > | GPU GI CI PID Type Process name GPU Memory | > | ID ID Usage | > |=============================================================================| > | No running processes found | > +-----------------------------------------------------------------------------+ and cuda version is > $nvcc -V > nvcc: NVIDIA (R) Cuda compiler driver > Copyright (c) 2005-2020 NVIDIA Corporation > Built on Mon_Nov_30_19:08:53_PST_2020 > Cuda compilation tools, release 11.2, V11.2.67 > Build cuda_11.2.r11.2/compiler.29373293_0 ollama_install.sh is below ``` #!/bin/sh # This script installs Ollama on Linux. # It detects the current operating system architecture and installs the appropriate version of Ollama. set -eu status() { echo ">>> $*" >&2; } error() { echo "ERROR $*"; exit 1; } warning() { echo "WARNING: $*"; } TEMP_DIR=$(mktemp -d) cleanup() { rm -rf $TEMP_DIR; } trap cleanup EXIT available() { command -v $1 >/dev/null; } require() { local MISSING='' for TOOL in $*; do if ! available $TOOL; then MISSING="$MISSING $TOOL" fi done echo $MISSING } [ "$(uname -s)" = "Linux" ] || error 'This script is intended to run on Linux only.' ARCH=$(uname -m) case "$ARCH" in x86_64) ARCH="amd64" ;; aarch64|arm64) ARCH="arm64" ;; *) error "Unsupported architecture: $ARCH" ;; esac IS_WSL2=false KERN=$(uname -r) case "$KERN" in *icrosoft*WSL2 | *icrosoft*wsl2) IS_WSL2=true;; *icrosoft) error "Microsoft WSL1 is not currently supported. Please upgrade to WSL2 with 'wsl --set-version <distro> 2'" ;; *) ;; esac VER_PARAM="${OLLAMA_VERSION:+?version=$OLLAMA_VERSION}" SUDO= if [ "$(id -u)" -ne 0 ]; then # Running as root, no need for sudo if ! available sudo; then error "This script requires superuser permissions. Please re-run as root." fi SUDO="sudo" fi NEEDS=$(require curl awk grep sed tee xargs) if [ -n "$NEEDS" ]; then status "ERROR: The following tools are required but missing:" for NEED in $NEEDS; do echo " - $NEED" done exit 1 fi for BINDIR in /usr/local/bin /usr/bin /bin; do echo $PATH | grep -q $BINDIR && break || continue done OLLAMA_INSTALL_DIR=$(dirname ${BINDIR}) status "Installing ollama to $OLLAMA_INSTALL_DIR" $SUDO install -o0 -g0 -m755 -d $BINDIR $SUDO install -o0 -g0 -m755 -d "$OLLAMA_INSTALL_DIR" if curl -I --silent --fail --location "https://ollama.com/download/ollama-linux-${ARCH}.tgz${VER_PARAM}" >/dev/null ; then status "Downloading Linux ${ARCH} bundle" curl --fail --show-error --location --progress-bar \ "https://ollama.com/download/ollama-linux-${ARCH}.tgz${VER_PARAM}" | \ $SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR" BUNDLE=1 if [ "$OLLAMA_INSTALL_DIR/bin/ollama" != "$BINDIR/ollama" ] ; then status "Making ollama accessible in the PATH in $BINDIR" $SUDO ln -sf "$OLLAMA_INSTALL_DIR/ollama" "$BINDIR/ollama" fi else status "Downloading Linux ${ARCH} CLI" curl --fail --show-error --location --progress-bar -o "$TEMP_DIR/ollama"\ "https://ollama.com/download/ollama-linux-${ARCH}${VER_PARAM}" $SUDO install -o0 -g0 -m755 $TEMP_DIR/ollama $OLLAMA_INSTALL_DIR/ollama BUNDLE=0 if [ "$OLLAMA_INSTALL_DIR/ollama" != "$BINDIR/ollama" ] ; then status "Making ollama accessible in the PATH in $BINDIR" $SUDO ln -sf "$OLLAMA_INSTALL_DIR/ollama" "$BINDIR/ollama" fi fi install_success() { status 'The Ollama API is now available at 127.0.0.1:11434.' status 'Install complete. Run "ollama" from the command line.' } trap install_success EXIT # Everything from this point onwards is optional. configure_systemd() { status "Creating ollama systemd service..." cat <<EOF | $SUDO tee /etc/systemd/system/ollama.service >/dev/null [Unit] Description=Ollama Service After=network-online.target [Service] ExecStart=$BINDIR/ollama serve Restart=always RestartSec=3 Environment="PATH=$PATH" [Install] WantedBy=default.target EOF SYSTEMCTL_RUNNING="$(systemctl is-system-running || true)" case $SYSTEMCTL_RUNNING in running|degraded) status "Enabling and starting ollama service..." $SUDO systemctl daemon-reload $SUDO systemctl enable ollama start_service() { $SUDO systemctl restart ollama; } trap start_service EXIT ;; esac } if available systemctl; then configure_systemd fi # WSL2 only supports GPUs via nvidia passthrough # so check for nvidia-smi to determine if GPU is available if [ "$IS_WSL2" = true ]; then if available nvidia-smi && [ -n "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then status "Nvidia GPU detected." fi install_success exit 0 fi # Install GPU dependencies on Linux if ! available lspci && ! available lshw; then warning "Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies." exit 0 fi check_gpu() { # Look for devices based on vendor ID for NVIDIA and AMD case $1 in lspci) case $2 in nvidia) available lspci && lspci -d '10de:' | grep -q 'NVIDIA' || return 1 ;; amdgpu) available lspci && lspci -d '1002:' | grep -q 'AMD' || return 1 ;; esac ;; lshw) case $2 in nvidia) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[10DE\]' || return 1 ;; amdgpu) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[1002\]' || return 1 ;; esac ;; nvidia-smi) available nvidia-smi || return 1 ;; esac } if check_gpu nvidia-smi; then status "NVIDIA GPU installed." exit 0 fi if ! check_gpu lspci nvidia && ! check_gpu lshw nvidia && ! check_gpu lspci amdgpu && ! check_gpu lshw amdgpu; then install_success warning "No NVIDIA/AMD GPU detected. Ollama will run in CPU-only mode." exit 0 fi if check_gpu lspci amdgpu || check_gpu lshw amdgpu; then if [ $BUNDLE -ne 0 ]; then status "Downloading Linux ROCm ${ARCH} bundle" curl --fail --show-error --location --progress-bar \ "https://ollama.com/download/ollama-linux-${ARCH}-rocm.tgz${VER_PARAM}" | \ $SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR" install_success status "AMD GPU ready." exit 0 fi # Look for pre-existing ROCm v6 before downloading the dependencies for search in "${HIP_PATH:-''}" "${ROCM_PATH:-''}" "/opt/rocm" "/usr/lib64"; do if [ -n "${search}" ] && [ -e "${search}/libhipblas.so.2" -o -e "${search}/lib/libhipblas.so.2" ]; then status "Compatible AMD GPU ROCm library detected at ${search}" install_success exit 0 fi done status "Downloading AMD GPU dependencies..." $SUDO rm -rf /usr/share/ollama/lib $SUDO chmod o+x /usr/share/ollama $SUDO install -o ollama -g ollama -m 755 -d /usr/share/ollama/lib/rocm curl --fail --show-error --location --progress-bar "https://ollama.com/download/ollama-linux-amd64-rocm.tgz${VER_PARAM}" \ | $SUDO tar zx --owner ollama --group ollama -C /usr/share/ollama/lib/rocm . install_success status "AMD GPU ready." exit 0 fi CUDA_REPO_ERR_MSG="NVIDIA GPU detected, but your OS and Architecture are not supported by NVIDIA. Please install the CUDA driver manually https://docs.nvidia.com/cuda/cuda-installation-guide-linux/" # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-7-centos-7 # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-8-rocky-8 # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-9-rocky-9 # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#fedora install_cuda_driver_yum() { status 'Installing NVIDIA repository...' case $PACKAGE_MANAGER in yum) $SUDO $PACKAGE_MANAGER -y install yum-utils if curl -I --silent --fail --location "https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo" >/dev/null ; then $SUDO $PACKAGE_MANAGER-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo else error $CUDA_REPO_ERR_MSG fi ;; dnf) if curl -I --silent --fail --location "https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo" >/dev/null ; then $SUDO $PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo else error $CUDA_REPO_ERR_MSG fi ;; esac case $1 in rhel) status 'Installing EPEL repository...' # EPEL is required for third-party dependencies such as dkms and libvdpau $SUDO $PACKAGE_MANAGER -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$2.noarch.rpm || true ;; esac status 'Installing CUDA driver...' if [ "$1" = 'centos' ] || [ "$1$2" = 'rhel7' ]; then $SUDO $PACKAGE_MANAGER -y install nvidia-driver-latest-dkms fi $SUDO $PACKAGE_MANAGER -y install cuda-drivers } # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian install_cuda_driver_apt() { status 'Installing NVIDIA repository...' if curl -I --silent --fail --location "https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-keyring_1.1-1_all.deb" >/dev/null ; then curl -fsSL -o $TEMP_DIR/cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-keyring_1.1-1_all.deb else error $CUDA_REPO_ERR_MSG fi case $1 in debian) status 'Enabling contrib sources...' $SUDO sed 's/main/contrib/' < /etc/apt/sources.list | $SUDO tee /etc/apt/sources.list.d/contrib.list > /dev/null if [ -f "/etc/apt/sources.list.d/debian.sources" ]; then $SUDO sed 's/main/contrib/' < /etc/apt/sources.list.d/debian.sources | $SUDO tee /etc/apt/sources.list.d/contrib.sources > /dev/null fi ;; esac status 'Installing CUDA driver...' $SUDO dpkg -i $TEMP_DIR/cuda-keyring.deb $SUDO apt-get update [ -n "$SUDO" ] && SUDO_E="$SUDO -E" || SUDO_E= DEBIAN_FRONTEND=noninteractive $SUDO_E apt-get -y install cuda-drivers -q } if [ ! -f "/etc/os-release" ]; then error "Unknown distribution. Skipping CUDA installation." fi . /etc/os-release OS_NAME=$ID OS_VERSION=$VERSION_ID PACKAGE_MANAGER= for PACKAGE_MANAGER in dnf yum apt-get; do if available $PACKAGE_MANAGER; then break fi done if [ -z "$PACKAGE_MANAGER" ]; then error "Unknown package manager. Skipping CUDA installation." fi if ! check_gpu nvidia-smi || [ -z "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then case $OS_NAME in centos|rhel) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -d '.' -f 1) ;; rocky) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -c1) ;; fedora) [ $OS_VERSION -lt '39' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME '39';; amzn) install_cuda_driver_yum 'fedora' '37' ;; debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ;; ubuntu) install_cuda_driver_apt $OS_NAME $(echo $OS_VERSION | sed 's/\.//') ;; *) exit ;; esac fi if ! lsmod | grep -q nvidia || ! lsmod | grep -q nvidia_uvm; then KERNEL_RELEASE="$(uname -r)" case $OS_NAME in rocky) $SUDO $PACKAGE_MANAGER -y install kernel-devel kernel-headers ;; centos|rhel|amzn) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE kernel-headers-$KERNEL_RELEASE ;; fedora) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE ;; debian|ubuntu) $SUDO apt-get -y install linux-headers-$KERNEL_RELEASE ;; *) exit ;; esac NVIDIA_CUDA_VERSION=$($SUDO dkms status | awk -F: '/added/ { print $1 }') if [ -n "$NVIDIA_CUDA_VERSION" ]; then $SUDO dkms install $NVIDIA_CUDA_VERSION fi if lsmod | grep -q nouveau; then status 'Reboot to complete NVIDIA CUDA driver install.' exit 0 fi $SUDO modprobe nvidia $SUDO modprobe nvidia_uvm fi # make sure the NVIDIA modules are loaded on boot with nvidia-persistenced if command -v nvidia-persistenced > /dev/null 2>&1; then $SUDO touch /etc/modules-load.d/nvidia.conf MODULES="nvidia nvidia-uvm" for MODULE in $MODULES; do if ! grep -qxF "$MODULE" /etc/modules-load.d/nvidia.conf; then echo "$MODULE" | sudo tee -a /etc/modules-load.d/nvidia.conf > /dev/null fi done fi status "NVIDIA GPU ready." install_success ``` run result of the sh file is > $sudo sh ollama_install.sh > >>> Installing ollama to /usr/local > >>> Downloading Linux amd64 bundle > ######################################################################## 100.0% > >>> Creating ollama systemd service... > >>> Enabling and starting ollama service... > Created symlink from /etc/systemd/system/default.target.wants/ollama.service to /etc/systemd/system/ollama.service. > >>> NVIDIA GPU installed. ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.3.39
{ "login": "Wangzg97", "id": 33352902, "node_id": "MDQ6VXNlcjMzMzUyOTAy", "avatar_url": "https://avatars.githubusercontent.com/u/33352902?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Wangzg97", "html_url": "https://github.com/Wangzg97", "followers_url": "https://api.github.com/users/Wangzg97/followers", "following_url": "https://api.github.com/users/Wangzg97/following{/other_user}", "gists_url": "https://api.github.com/users/Wangzg97/gists{/gist_id}", "starred_url": "https://api.github.com/users/Wangzg97/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wangzg97/subscriptions", "organizations_url": "https://api.github.com/users/Wangzg97/orgs", "repos_url": "https://api.github.com/users/Wangzg97/repos", "events_url": "https://api.github.com/users/Wangzg97/events{/privacy}", "received_events_url": "https://api.github.com/users/Wangzg97/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6626/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6626/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6418
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6418/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6418/comments
https://api.github.com/repos/ollama/ollama/issues/6418/events
https://github.com/ollama/ollama/issues/6418
2,473,002,971
I_kwDOJ0Z1Ps6TZwfb
6,418
Everytime -d doesnot work
{ "login": "Sakethsreeram7", "id": 107628648, "node_id": "U_kgDOBmpIaA", "avatar_url": "https://avatars.githubusercontent.com/u/107628648?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Sakethsreeram7", "html_url": "https://github.com/Sakethsreeram7", "followers_url": "https://api.github.com/users/Sakethsreeram7/followers", "following_url": "https://api.github.com/users/Sakethsreeram7/following{/other_user}", "gists_url": "https://api.github.com/users/Sakethsreeram7/gists{/gist_id}", "starred_url": "https://api.github.com/users/Sakethsreeram7/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Sakethsreeram7/subscriptions", "organizations_url": "https://api.github.com/users/Sakethsreeram7/orgs", "repos_url": "https://api.github.com/users/Sakethsreeram7/repos", "events_url": "https://api.github.com/users/Sakethsreeram7/events{/privacy}", "received_events_url": "https://api.github.com/users/Sakethsreeram7/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-08-19T10:21:40
2024-08-20T11:02:24
2024-08-20T11:02:24
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### Curl is not working in postman ``` curl http://localhost:11434/api/generate -d '{ "model": "llama3", "prompt": "Why is the sky blue?" }' ``` This does not work in postman instead --data is working consistently
{ "login": "Sakethsreeram7", "id": 107628648, "node_id": "U_kgDOBmpIaA", "avatar_url": "https://avatars.githubusercontent.com/u/107628648?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Sakethsreeram7", "html_url": "https://github.com/Sakethsreeram7", "followers_url": "https://api.github.com/users/Sakethsreeram7/followers", "following_url": "https://api.github.com/users/Sakethsreeram7/following{/other_user}", "gists_url": "https://api.github.com/users/Sakethsreeram7/gists{/gist_id}", "starred_url": "https://api.github.com/users/Sakethsreeram7/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Sakethsreeram7/subscriptions", "organizations_url": "https://api.github.com/users/Sakethsreeram7/orgs", "repos_url": "https://api.github.com/users/Sakethsreeram7/repos", "events_url": "https://api.github.com/users/Sakethsreeram7/events{/privacy}", "received_events_url": "https://api.github.com/users/Sakethsreeram7/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6418/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6418/timeline
null
completed
false