url
stringlengths
51
54
repository_url
stringclasses
1 value
labels_url
stringlengths
65
68
comments_url
stringlengths
60
63
events_url
stringlengths
58
61
html_url
stringlengths
39
44
id
int64
1.78B
2.82B
node_id
stringlengths
18
19
number
int64
1
8.69k
title
stringlengths
1
382
user
dict
labels
listlengths
0
5
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
2
milestone
null
comments
int64
0
323
created_at
timestamp[s]
updated_at
timestamp[s]
closed_at
timestamp[s]
author_association
stringclasses
4 values
sub_issues_summary
dict
active_lock_reason
null
draft
bool
2 classes
pull_request
dict
body
stringlengths
2
118k
βŒ€
closed_by
dict
reactions
dict
timeline_url
stringlengths
60
63
performed_via_github_app
null
state_reason
stringclasses
4 values
is_pull_request
bool
2 classes
https://api.github.com/repos/ollama/ollama/issues/4589
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4589/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4589/comments
https://api.github.com/repos/ollama/ollama/issues/4589/events
https://github.com/ollama/ollama/issues/4589
2,312,687,709
I_kwDOJ0Z1Ps6J2NBd
4,589
How do I update all the models downloaded locally?
{ "login": "qzc438", "id": 61488260, "node_id": "MDQ6VXNlcjYxNDg4MjYw", "avatar_url": "https://avatars.githubusercontent.com/u/61488260?v=4", "gravatar_id": "", "url": "https://api.github.com/users/qzc438", "html_url": "https://github.com/qzc438", "followers_url": "https://api.github.com/users/qzc438/followers", "following_url": "https://api.github.com/users/qzc438/following{/other_user}", "gists_url": "https://api.github.com/users/qzc438/gists{/gist_id}", "starred_url": "https://api.github.com/users/qzc438/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/qzc438/subscriptions", "organizations_url": "https://api.github.com/users/qzc438/orgs", "repos_url": "https://api.github.com/users/qzc438/repos", "events_url": "https://api.github.com/users/qzc438/events{/privacy}", "received_events_url": "https://api.github.com/users/qzc438/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
5
2024-05-23T11:39:04
2024-10-16T20:08:43
2024-05-23T17:35:25
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
See title.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4589/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4589/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4921
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4921/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4921/comments
https://api.github.com/repos/ollama/ollama/issues/4921/events
https://github.com/ollama/ollama/pull/4921
2,341,291,530
PR_kwDOJ0Z1Ps5x1-Vw
4,921
update import.md
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-06-07T23:45:27
2024-06-10T18:41:10
2024-06-10T18:41:10
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4921", "html_url": "https://github.com/ollama/ollama/pull/4921", "diff_url": "https://github.com/ollama/ollama/pull/4921.diff", "patch_url": "https://github.com/ollama/ollama/pull/4921.patch", "merged_at": "2024-06-10T18:41:10" }
update import docs with recently added features
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4921/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4921/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7694
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7694/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7694/comments
https://api.github.com/repos/ollama/ollama/issues/7694/events
https://github.com/ollama/ollama/issues/7694
2,663,322,698
I_kwDOJ0Z1Ps6evxRK
7,694
When I run Ollama using AMD 6750GRE 12G I get an error - gfx1031 unsupported by official ROCm on windows
{ "login": "a961335435", "id": 52148615, "node_id": "MDQ6VXNlcjUyMTQ4NjE1", "avatar_url": "https://avatars.githubusercontent.com/u/52148615?v=4", "gravatar_id": "", "url": "https://api.github.com/users/a961335435", "html_url": "https://github.com/a961335435", "followers_url": "https://api.github.com/users/a961335435/followers", "following_url": "https://api.github.com/users/a961335435/following{/other_user}", "gists_url": "https://api.github.com/users/a961335435/gists{/gist_id}", "starred_url": "https://api.github.com/users/a961335435/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/a961335435/subscriptions", "organizations_url": "https://api.github.com/users/a961335435/orgs", "repos_url": "https://api.github.com/users/a961335435/repos", "events_url": "https://api.github.com/users/a961335435/events{/privacy}", "received_events_url": "https://api.github.com/users/a961335435/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 5860134234, "node_id": "LA_kwDOJ0Z1Ps8AAAABXUqNWg", "url": "https://api.github.com/repos/ollama/ollama/labels/windows", "name": "windows", "color": "0052CC", "default": false, "description": "" } ]
open
false
null
[]
null
6
2024-11-15T22:22:47
2024-11-19T17:31:12
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? After downloading and installing. Requires additional download of compiled Rocblas Rocblas.dll overwrites the rocblas.dll that comes with the SDK, and puts rocblas.dll in the relative path of the HIP SDK file (if not, please download it yourself), and replaces the library folder (C:\Program Files\AMD that comes with the SDK \ROCm\6.1\bin\rocblas\library), it can run normally on the specified graphics card. The second step is to replace the rocblas.dll file and Library folder in the ollama program directory (C:\Users\96133\AppData\Local\Programs\Ollama\lib\ollama Chinese file and folder with the same name) Then I can let ollama run normally on the graphics card, but after I finish it, I get a prompt Microsoft Windows [Version 10.0.19045.4894] (c) Microsoft Corporation. All rights reserved. C:\Users\96133>ollama run qwen2.5-coder:14b >>> 2 Error: POST predict: Post "http://127.0.0.1:53690/completion": read tcp 127.0.0.1:53698->127.0.0.1:53690: wsarecv: An existing connection was forcibly closed by the remote host. C:\Users\96133> When I change the model too C:\Users\96133>ollama run qwen2.5-coder pulling manifest pulling 60e05f210007... 100% β–•β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– 4.7 GB pulling 66b9ea09bd5b... 100% β–•β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– 68 B pulling e94a8ecb9327... 100% β–•β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– 1.6 KB pulling 832dd9e00a68... 100% β–•β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– 11 KB pulling d9bb33f27869... 100% β–•β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– 487 B verifying sha256 digest writing manifest success >>> 3 Error: POST predict: Post "http://127.0.0.1:52408/completion": read tcp 127.0.0.1:52411->127.0.0.1:52408: wsarecv: An existing connection was forcibly closed by the remote host. Attached are the logs in the C:\Users\96133\AppData\Local\Ollama folder I don't know why this is happening, when I run the file without replacing it on the cpu there is no problem,The ROCm I use is rocm.gfx1031.for.hip.sdk.6.1.2.7z from https://github.com/likelovewant/ROCmLibs-for-gfx1103-AMD780M-APU/ [app-1.log](https://github.com/user-attachments/files/17781911/app-1.log) [server.log](https://github.com/user-attachments/files/17781914/server.log) help me what to do ### OS Windows ### GPU AMD ### CPU AMD ### Ollama version 0.4.1
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7694/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7694/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/4066
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4066/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4066/comments
https://api.github.com/repos/ollama/ollama/issues/4066/events
https://github.com/ollama/ollama/issues/4066
2,272,615,662
I_kwDOJ0Z1Ps6HdVzu
4,066
Support IPEX-LLM
{ "login": "shawnshi", "id": 1212354, "node_id": "MDQ6VXNlcjEyMTIzNTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/1212354?v=4", "gravatar_id": "", "url": "https://api.github.com/users/shawnshi", "html_url": "https://github.com/shawnshi", "followers_url": "https://api.github.com/users/shawnshi/followers", "following_url": "https://api.github.com/users/shawnshi/following{/other_user}", "gists_url": "https://api.github.com/users/shawnshi/gists{/gist_id}", "starred_url": "https://api.github.com/users/shawnshi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shawnshi/subscriptions", "organizations_url": "https://api.github.com/users/shawnshi/orgs", "repos_url": "https://api.github.com/users/shawnshi/repos", "events_url": "https://api.github.com/users/shawnshi/events{/privacy}", "received_events_url": "https://api.github.com/users/shawnshi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
2
2024-04-30T22:57:44
2024-10-18T06:37:53
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Will add IPEX-LLM support default
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4066/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4066/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1716
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1716/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1716/comments
https://api.github.com/repos/ollama/ollama/issues/1716/events
https://github.com/ollama/ollama/issues/1716
2,056,024,732
I_kwDOJ0Z1Ps56jHKc
1,716
is there a way to calculate token size?
{ "login": "ralyodio", "id": 27381, "node_id": "MDQ6VXNlcjI3Mzgx", "avatar_url": "https://avatars.githubusercontent.com/u/27381?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ralyodio", "html_url": "https://github.com/ralyodio", "followers_url": "https://api.github.com/users/ralyodio/followers", "following_url": "https://api.github.com/users/ralyodio/following{/other_user}", "gists_url": "https://api.github.com/users/ralyodio/gists{/gist_id}", "starred_url": "https://api.github.com/users/ralyodio/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ralyodio/subscriptions", "organizations_url": "https://api.github.com/users/ralyodio/orgs", "repos_url": "https://api.github.com/users/ralyodio/repos", "events_url": "https://api.github.com/users/ralyodio/events{/privacy}", "received_events_url": "https://api.github.com/users/ralyodio/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
20
2023-12-26T04:48:35
2024-09-04T03:54:43
2024-09-04T03:54:43
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I don't know if this limitation exists with the api. I'm swtiching from openai to ollama api, and with openai I need to calculate token size and subtract it from the total 4096. Do we need to do that for ollama api? If so, how do I caclulate token size of prompt?
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1716/reactions", "total_count": 7, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 3 }
https://api.github.com/repos/ollama/ollama/issues/1716/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5497
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5497/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5497/comments
https://api.github.com/repos/ollama/ollama/issues/5497/events
https://github.com/ollama/ollama/pull/5497
2,392,736,249
PR_kwDOJ0Z1Ps50i5kQ
5,497
docs: add OpenGPA in Readme Web & Desktop
{ "login": "eschnou", "id": 185660, "node_id": "MDQ6VXNlcjE4NTY2MA==", "avatar_url": "https://avatars.githubusercontent.com/u/185660?v=4", "gravatar_id": "", "url": "https://api.github.com/users/eschnou", "html_url": "https://github.com/eschnou", "followers_url": "https://api.github.com/users/eschnou/followers", "following_url": "https://api.github.com/users/eschnou/following{/other_user}", "gists_url": "https://api.github.com/users/eschnou/gists{/gist_id}", "starred_url": "https://api.github.com/users/eschnou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eschnou/subscriptions", "organizations_url": "https://api.github.com/users/eschnou/orgs", "repos_url": "https://api.github.com/users/eschnou/repos", "events_url": "https://api.github.com/users/eschnou/events{/privacy}", "received_events_url": "https://api.github.com/users/eschnou/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
3
2024-07-05T14:41:12
2024-11-21T17:15:50
2024-11-21T08:13:54
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5497", "html_url": "https://github.com/ollama/ollama/pull/5497", "diff_url": "https://github.com/ollama/ollama/pull/5497.diff", "patch_url": "https://github.com/ollama/ollama/pull/5497.patch", "merged_at": "2024-11-21T08:13:54" }
Hello, OpenGPA is a offline-first open-source Enterprise Agentic Application. It has Ollama support and Ollama specific documentation (https://github.com/eschnou/OpenGPA/blob/main/documentation/offline.md). This integration allows for a fully offline experience, which is required in some critical enterprise environments. Thanks for the awesome work on Ollama and thanks for reviewing and merging this one 🀩 Best regards, Laurent
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5497/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5497/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2605
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2605/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2605/comments
https://api.github.com/repos/ollama/ollama/issues/2605/events
https://github.com/ollama/ollama/pull/2605
2,143,559,024
PR_kwDOJ0Z1Ps5nWftO
2,605
Basic whitespace detection in JSON mode
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
4
2024-02-20T05:15:58
2024-03-12T18:10:24
2024-03-12T18:10:23
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
true
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2605", "html_url": "https://github.com/ollama/ollama/pull/2605", "diff_url": "https://github.com/ollama/ollama/pull/2605.diff", "patch_url": "https://github.com/ollama/ollama/pull/2605.patch", "merged_at": null }
This stops hanging from infinite whitespace generation by detecting 100 consecutive whitespace tokens and cancelling Other ideas: - [ ] Repetition detection – detect the repetition of the same string over and over again - [ ] Only do this after detecting a full json object - [ ] Lower whitespace logit bias when using JSON mode (might affect outcome of the response) - [ ] Force user to specify `JSON` in the prompt (might be hard for folks to know this, this is what OpenAI does)
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2605/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2605/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6649
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6649/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6649/comments
https://api.github.com/repos/ollama/ollama/issues/6649/events
https://github.com/ollama/ollama/issues/6649
2,506,712,288
I_kwDOJ0Z1Ps6VaWTg
6,649
Intel GPU - model > 4b nonsense?
{ "login": "cyear", "id": 77969791, "node_id": "MDQ6VXNlcjc3OTY5Nzkx", "avatar_url": "https://avatars.githubusercontent.com/u/77969791?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cyear", "html_url": "https://github.com/cyear", "followers_url": "https://api.github.com/users/cyear/followers", "following_url": "https://api.github.com/users/cyear/following{/other_user}", "gists_url": "https://api.github.com/users/cyear/gists{/gist_id}", "starred_url": "https://api.github.com/users/cyear/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cyear/subscriptions", "organizations_url": "https://api.github.com/users/cyear/orgs", "repos_url": "https://api.github.com/users/cyear/repos", "events_url": "https://api.github.com/users/cyear/events{/privacy}", "received_events_url": "https://api.github.com/users/cyear/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6677491450, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgJu-g", "url": "https://api.github.com/repos/ollama/ollama/labels/intel", "name": "intel", "color": "226E5B", "default": false, "description": "issues relating to Intel GPUs" } ]
open
false
null
[]
null
6
2024-09-05T03:57:35
2024-11-10T03:28:08
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? qwen4b works fine, all other models larger than 4b are gibberish ``` time=2024-09-05T11:35:49.569+08:00 level=INFO source=download.go:175 msg="downloading 8eeb52dfb3bb in 16 291 MB part(s)" time=2024-09-05T11:37:19.112+08:00 level=INFO source=download.go:370 msg="8eeb52dfb3bb part 0 stalled; retrying. If this persists, press ctrl-c to exit, then 'ollama pull' to find a faster connection." time=2024-09-05T11:37:21.112+08:00 level=INFO source=download.go:370 msg="8eeb52dfb3bb part 4 stalled; retrying. If this persists, press ctrl-c to exit, then 'ollama pull' to find a faster connection." [GIN] 2024/09/05 - 11:41:40 | 200 | 5m55s | 10.0.0.18 | POST "/api/pull" [GIN] 2024/09/05 - 11:51:04 | 200 | 1.182ms | 10.0.0.18 | GET "/api/tags" [GIN] 2024/09/05 - 11:51:05 | 200 | 0s | 10.0.0.18 | GET "/api/version" [GIN] 2024/09/05 - 11:51:24 | 200 | 510.7Β΅s | 10.0.0.18 | GET "/api/version" [GIN] 2024/09/05 - 11:51:33 | 200 | 0s | 10.0.0.18 | GET "/api/version" time=2024-09-05T11:51:51.177+08:00 level=INFO source=download.go:175 msg="downloading 8eeb52dfb3bb in 16 291 MB part(s)" time=2024-09-05T11:51:58.238+08:00 level=INFO source=download.go:175 msg="downloading 73b313b5552d in 1 1.4 KB part(s)" time=2024-09-05T11:52:01.269+08:00 level=INFO source=download.go:175 msg="downloading 0ba8f0e314b4 in 1 12 KB part(s)" time=2024-09-05T11:52:04.339+08:00 level=INFO source=download.go:175 msg="downloading 56bb8bd477a5 in 1 96 B part(s)" time=2024-09-05T11:52:07.492+08:00 level=INFO source=download.go:175 msg="downloading 1a4c3c319823 in 1 485 B part(s)" [GIN] 2024/09/05 - 11:52:14 | 200 | 28.5001976s | 10.0.0.18 | POST "/api/pull" [GIN] 2024/09/05 - 11:52:14 | 200 | 1.0817ms | 10.0.0.18 | GET "/api/tags" [GIN] 2024/09/05 - 11:52:18 | 200 | 0s | 10.0.0.18 | GET "/api/version" time=2024-09-05T11:52:23.514+08:00 level=INFO source=memory.go:309 msg="offload to cpu" layers.requested=-1 layers.model=33 layers.offload=0 layers.split="" memory.available="[20.3 GiB]" memory.required.full="4.6 GiB" memory.required.partial="0 B" memory.required.kv="256.0 MiB" memory.required.allocations="[4.6 GiB]" memory.weights.total="3.9 GiB" memory.weights.repeating="3.5 GiB" memory.weights.nonrepeating="411.0 MiB" memory.graph.full="164.0 MiB" memory.graph.partial="677.5 MiB" time=2024-09-05T11:52:23.520+08:00 level=INFO source=server.go:395 msg="starting llama server" cmd="C:\\Users\\12742\\Desktop\\llama-cpp\\dist\\windows-amd64\\lib\\ollama\\runners\\cpu_avx2\\ollama_llama_server.exe --model C:\\Users\\12742\\.ollama\\models\\blobs\\sha256-8eeb52dfb3bb9aefdf9d1ef24b3bdbcfbe82238798c4b918278320b6fcef18fe --ctx-size 2048 --batch-size 512 --embedding --log-disable --n-gpu-layers 999 --no-mmap --parallel 1 --port 55176" time=2024-09-05T11:52:23.546+08:00 level=INFO source=sched.go:450 msg="loaded runners" count=1 time=2024-09-05T11:52:23.546+08:00 level=INFO source=server.go:595 msg="waiting for llama runner to start responding" time=2024-09-05T11:52:23.547+08:00 level=INFO source=server.go:629 msg="waiting for server to become available" status="llm server error" INFO [wmain] build info | build=1 commit="c455d1d" tid="6776" timestamp=1725508343 INFO [wmain] system info | n_threads=14 n_threads_batch=-1 system_info="AVX = 0 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 0 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | " tid="6776" timestamp=1725508343 total_threads=20 INFO [wmain] HTTP server listening | hostname="127.0.0.1" n_threads_http="19" port="55176" tid="6776" timestamp=1725508343 llama_model_loader: loaded meta data with 29 key-value pairs and 292 tensors from C:\Users\12742\.ollama\models\blobs\sha256-8eeb52dfb3bb9aefdf9d1ef24b3bdbcfbe82238798c4b918278320b6fcef18fe (version GGUF V3 (latest)) llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = llama llama_model_loader: - kv 1: general.type str = model llama_model_loader: - kv 2: general.name str = Meta Llama 3.1 8B Instruct llama_model_loader: - kv 3: general.finetune str = Instruct llama_model_loader: - kv 4: general.basename str = Meta-Llama-3.1 llama_model_loader: - kv 5: general.size_label str = 8B llama_model_loader: - kv 6: general.license str = llama3.1 llama_model_loader: - kv 7: general.tags arr[str,6] = ["facebook", "meta", "pytorch", "llam... llama_model_loader: - kv 8: general.languages arr[str,8] = ["en", "de", "fr", "it", "pt", "hi", ... llama_model_loader: - kv 9: llama.block_count u32 = 32 llama_model_loader: - kv 10: llama.context_length u32 = 131072 llama_model_loader: - kv 11: llama.embedding_length u32 = 4096 llama_model_loader: - kv 12: llama.feed_forward_length u32 = 14336 llama_model_loader: - kv 13: llama.attention.head_count u32 = 32 llama_model_loader: - kv 14: llama.attention.head_count_kv u32 = 8 llama_model_loader: - kv 15: llama.rope.freq_base f32 = 500000.000000 llama_model_loader: - kv 16: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 llama_model_loader: - kv 17: general.file_type u32 = 2 llama_model_loader: - kv 18: llama.vocab_size u32 = 128256 llama_model_loader: - kv 19: llama.rope.dimension_count u32 = 128 llama_model_loader: - kv 20: tokenizer.ggml.model str = gpt2 llama_model_loader: - kv 21: tokenizer.ggml.pre str = llama-bpe llama_model_loader: - kv 22: tokenizer.ggml.tokens arr[str,128256] = ["!", "\"", "#", "$", "%", "&", "'", ... llama_model_loader: - kv 23: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... llama_model_loader: - kv 24: tokenizer.ggml.merges arr[str,280147] = ["Δ  Δ ", "Δ  Δ Δ Δ ", "Δ Δ  Δ Δ ", "... llama_model_loader: - kv 25: tokenizer.ggml.bos_token_id u32 = 128000 llama_model_loader: - kv 26: tokenizer.ggml.eos_token_id u32 = 128009 llama_model_loader: - kv 27: tokenizer.chat_template str = {{- bos_token }}\n{%- if custom_tools ... llama_model_loader: - kv 28: general.quantization_version u32 = 2 llama_model_loader: - type f32: 66 tensors llama_model_loader: - type q4_0: 225 tensors llama_model_loader: - type q6_K: 1 tensors time=2024-09-05T11:52:23.809+08:00 level=INFO source=server.go:629 msg="waiting for server to become available" status="llm server loading model" llm_load_vocab: special tokens cache size = 256 llm_load_vocab: token to piece cache size = 0.7999 MB llm_load_print_meta: format = GGUF V3 (latest) llm_load_print_meta: arch = llama llm_load_print_meta: vocab type = BPE llm_load_print_meta: n_vocab = 128256 llm_load_print_meta: n_merges = 280147 llm_load_print_meta: vocab_only = 0 llm_load_print_meta: n_ctx_train = 131072 llm_load_print_meta: n_embd = 4096 llm_load_print_meta: n_layer = 32 llm_load_print_meta: n_head = 32 llm_load_print_meta: n_head_kv = 8 llm_load_print_meta: n_rot = 128 llm_load_print_meta: n_swa = 0 llm_load_print_meta: n_embd_head_k = 128 llm_load_print_meta: n_embd_head_v = 128 llm_load_print_meta: n_gqa = 4 llm_load_print_meta: n_embd_k_gqa = 1024 llm_load_print_meta: n_embd_v_gqa = 1024 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-05 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: f_logit_scale = 0.0e+00 llm_load_print_meta: n_ff = 14336 llm_load_print_meta: n_expert = 0 llm_load_print_meta: n_expert_used = 0 llm_load_print_meta: causal attn = 1 llm_load_print_meta: pooling type = 0 llm_load_print_meta: rope type = 0 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 500000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_ctx_orig_yarn = 131072 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: ssm_d_conv = 0 llm_load_print_meta: ssm_d_inner = 0 llm_load_print_meta: ssm_d_state = 0 llm_load_print_meta: ssm_dt_rank = 0 llm_load_print_meta: ssm_dt_b_c_rms = 0 llm_load_print_meta: model type = 8B llm_load_print_meta: model ftype = Q4_0 llm_load_print_meta: model params = 8.03 B llm_load_print_meta: model size = 4.33 GiB (4.64 BPW) llm_load_print_meta: general.name = Meta Llama 3.1 8B Instruct llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>' llm_load_print_meta: EOS token = 128009 '<|eot_id|>' llm_load_print_meta: LF token = 128 'Γ„' llm_load_print_meta: EOT token = 128009 '<|eot_id|>' llm_load_print_meta: max token length = 256 ggml_sycl_init: GGML_SYCL_FORCE_MMQ: no ggml_sycl_init: SYCL_USE_XMX: yes ggml_sycl_init: found 1 SYCL devices: llm_load_tensors: ggml ctx size = 0.27 MiB llm_load_tensors: offloading 32 repeating layers to GPU llm_load_tensors: offloading non-repeating layers to GPU llm_load_tensors: offloaded 33/33 layers to GPU llm_load_tensors: SYCL0 buffer size = 4156.00 MiB llm_load_tensors: SYCL_Host buffer size = 281.81 MiB llama_new_context_with_model: n_ctx = 2048 llama_new_context_with_model: n_batch = 512 llama_new_context_with_model: n_ubatch = 512 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 500000.0 llama_new_context_with_model: freq_scale = 1 [SYCL] call ggml_check_sycl ggml_check_sycl: GGML_SYCL_DEBUG: 0 ggml_check_sycl: GGML_SYCL_F16: no found 1 SYCL devices: | | | | |Max | |Max |Global | | | | | | |compute|Max work|sub |mem | | |ID| Device Type| Name|Version|units |group |group|size | Driver version| |--|-------------------|---------------------------------------|-------|-------|--------|-----|-------|---------------------| | 0| [level_zero:gpu:0]| Intel Arc A730M Graphics| 1.5| 384| 1024| 32| 12514M| 1.3.30398| llama_kv_cache_init: SYCL0 KV buffer size = 256.00 MiB llama_new_context_with_model: KV self size = 256.00 MiB, K (f16): 128.00 MiB, V (f16): 128.00 MiB llama_new_context_with_model: SYCL_Host output buffer size = 0.50 MiB llama_new_context_with_model: SYCL0 compute buffer size = 258.50 MiB llama_new_context_with_model: SYCL_Host compute buffer size = 12.01 MiB llama_new_context_with_model: graph nodes = 1062 llama_new_context_with_model: graph splits = 2 INFO [wmain] model loaded | tid="6776" timestamp=1725508352 time=2024-09-05T11:52:32.341+08:00 level=INFO source=server.go:634 msg="llama runner started in 8.80 seconds" ``` ![image](https://github.com/user-attachments/assets/616e39ab-9f78-48f3-8f86-fbc65a7b87d6) ### OS Linux, Windows ### GPU Intel ### CPU Intel ### Ollama version 0.3.6-ipexllm-20240905
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6649/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6649/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/6908
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6908/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6908/comments
https://api.github.com/repos/ollama/ollama/issues/6908/events
https://github.com/ollama/ollama/issues/6908
2,540,834,474
I_kwDOJ0Z1Ps6Xcg6q
6,908
How to use embedding models from huggingface hub?
{ "login": "fzyzcjy", "id": 5236035, "node_id": "MDQ6VXNlcjUyMzYwMzU=", "avatar_url": "https://avatars.githubusercontent.com/u/5236035?v=4", "gravatar_id": "", "url": "https://api.github.com/users/fzyzcjy", "html_url": "https://github.com/fzyzcjy", "followers_url": "https://api.github.com/users/fzyzcjy/followers", "following_url": "https://api.github.com/users/fzyzcjy/following{/other_user}", "gists_url": "https://api.github.com/users/fzyzcjy/gists{/gist_id}", "starred_url": "https://api.github.com/users/fzyzcjy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fzyzcjy/subscriptions", "organizations_url": "https://api.github.com/users/fzyzcjy/orgs", "repos_url": "https://api.github.com/users/fzyzcjy/repos", "events_url": "https://api.github.com/users/fzyzcjy/events{/privacy}", "received_events_url": "https://api.github.com/users/fzyzcjy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
5
2024-09-22T08:33:02
2025-01-18T06:12:36
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi thanks for the lib! I want to use some embedding models (arch is bert) from hf hub. I have tried gguf, but the converter says bert arch cannot be converted to that. I have also tried directly have a modelfile to import safetensors, but it says `Error: unsupported content type: unknown`
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6908/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/6908/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/8640
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8640/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8640/comments
https://api.github.com/repos/ollama/ollama/issues/8640/events
https://github.com/ollama/ollama/issues/8640
2,815,870,902
I_kwDOJ0Z1Ps6n1se2
8,640
Add install location variable to linux
{ "login": "muzzol", "id": 6727358, "node_id": "MDQ6VXNlcjY3MjczNTg=", "avatar_url": "https://avatars.githubusercontent.com/u/6727358?v=4", "gravatar_id": "", "url": "https://api.github.com/users/muzzol", "html_url": "https://github.com/muzzol", "followers_url": "https://api.github.com/users/muzzol/followers", "following_url": "https://api.github.com/users/muzzol/following{/other_user}", "gists_url": "https://api.github.com/users/muzzol/gists{/gist_id}", "starred_url": "https://api.github.com/users/muzzol/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/muzzol/subscriptions", "organizations_url": "https://api.github.com/users/muzzol/orgs", "repos_url": "https://api.github.com/users/muzzol/repos", "events_url": "https://api.github.com/users/muzzol/events{/privacy}", "received_events_url": "https://api.github.com/users/muzzol/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
1
2025-01-28T14:38:04
2025-01-29T16:47:20
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I would like to specify install location either with a parameter --install-dir or with a variable $OLLAMA_INSTALL_DIR or both the variable already exists in installer script, so it should be enough to allow overwriting it with custom value. something like: ``` export OLLAMA_INSTALL_DIR="/home/user/ollamainst" curl -fsSL https://ollama.com/install.sh | sh ```
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8640/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8640/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/6776
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6776/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6776/comments
https://api.github.com/repos/ollama/ollama/issues/6776/events
https://github.com/ollama/ollama/issues/6776
2,522,730,614
I_kwDOJ0Z1Ps6WXdB2
6,776
Pixtral model request
{ "login": "iplayfast", "id": 751306, "node_id": "MDQ6VXNlcjc1MTMwNg==", "avatar_url": "https://avatars.githubusercontent.com/u/751306?v=4", "gravatar_id": "", "url": "https://api.github.com/users/iplayfast", "html_url": "https://github.com/iplayfast", "followers_url": "https://api.github.com/users/iplayfast/followers", "following_url": "https://api.github.com/users/iplayfast/following{/other_user}", "gists_url": "https://api.github.com/users/iplayfast/gists{/gist_id}", "starred_url": "https://api.github.com/users/iplayfast/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/iplayfast/subscriptions", "organizations_url": "https://api.github.com/users/iplayfast/orgs", "repos_url": "https://api.github.com/users/iplayfast/repos", "events_url": "https://api.github.com/users/iplayfast/events{/privacy}", "received_events_url": "https://api.github.com/users/iplayfast/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
3
2024-09-12T15:31:38
2024-09-12T21:53:57
2024-09-12T21:53:33
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
https://huggingface.co/mistral-community/pixtral-12b-240910 Image support using mixtral tech.
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6776/reactions", "total_count": 12, "+1": 7, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 5, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6776/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4598
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4598/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4598/comments
https://api.github.com/repos/ollama/ollama/issues/4598/events
https://github.com/ollama/ollama/pull/4598
2,313,870,346
PR_kwDOJ0Z1Ps5wYe02
4,598
Tidy up developer guide a little
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-05-23T21:24:41
2024-05-23T22:14:32
2024-05-23T22:14:30
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4598", "html_url": "https://github.com/ollama/ollama/pull/4598", "diff_url": "https://github.com/ollama/ollama/pull/4598.diff", "patch_url": "https://github.com/ollama/ollama/pull/4598.patch", "merged_at": "2024-05-23T22:14:30" }
null
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4598/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4598/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/969
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/969/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/969/comments
https://api.github.com/repos/ollama/ollama/issues/969/events
https://github.com/ollama/ollama/issues/969
1,973,701,375
I_kwDOJ0Z1Ps51pEr_
969
###### problem
{ "login": "k3341095", "id": 17330375, "node_id": "MDQ6VXNlcjE3MzMwMzc1", "avatar_url": "https://avatars.githubusercontent.com/u/17330375?v=4", "gravatar_id": "", "url": "https://api.github.com/users/k3341095", "html_url": "https://github.com/k3341095", "followers_url": "https://api.github.com/users/k3341095/followers", "following_url": "https://api.github.com/users/k3341095/following{/other_user}", "gists_url": "https://api.github.com/users/k3341095/gists{/gist_id}", "starred_url": "https://api.github.com/users/k3341095/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/k3341095/subscriptions", "organizations_url": "https://api.github.com/users/k3341095/orgs", "repos_url": "https://api.github.com/users/k3341095/repos", "events_url": "https://api.github.com/users/k3341095/events{/privacy}", "received_events_url": "https://api.github.com/users/k3341095/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
30
2023-11-02T07:56:38
2024-04-17T02:15:04
2024-04-17T02:15:04
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
![image](https://github.com/jmorganca/ollama/assets/17330375/7148c0f6-47b4-4fa4-b219-436e12776f79) The command to install docker and run the 13b model worked fine. However run and subsequently hit hi, only #### is being taken to infinity.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/969/reactions", "total_count": 3, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 3 }
https://api.github.com/repos/ollama/ollama/issues/969/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7880
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7880/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7880/comments
https://api.github.com/repos/ollama/ollama/issues/7880/events
https://github.com/ollama/ollama/issues/7880
2,704,326,524
I_kwDOJ0Z1Ps6hML98
7,880
Add a CORS permissions model into the Ollama UI ("Allow example.com to use Ollama? [Yes] [No]")
{ "login": "blixt", "id": 158591, "node_id": "MDQ6VXNlcjE1ODU5MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/158591?v=4", "gravatar_id": "", "url": "https://api.github.com/users/blixt", "html_url": "https://github.com/blixt", "followers_url": "https://api.github.com/users/blixt/followers", "following_url": "https://api.github.com/users/blixt/following{/other_user}", "gists_url": "https://api.github.com/users/blixt/gists{/gist_id}", "starred_url": "https://api.github.com/users/blixt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/blixt/subscriptions", "organizations_url": "https://api.github.com/users/blixt/orgs", "repos_url": "https://api.github.com/users/blixt/repos", "events_url": "https://api.github.com/users/blixt/events{/privacy}", "received_events_url": "https://api.github.com/users/blixt/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
1
2024-11-29T08:28:44
2024-12-12T22:41:56
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Lots of AI apps out there solve access to LLM in a few different ways: - Directly use a hosted model and foot the bill for the user - Ask the user to provide their own hosted model API key (😬) - Let the user host the app themselves, providing the API key this way - Connect with a local model provider like Ollama, but this has several issues today[^1] I think Ollama is in wide enough circulation that it could create a permissions standard around local model access from the browser. An initial draft of this could be very simple: The first time a request comes in with an `Origin` value that's never been seen before, hold the request and ask the user with a system notification: "Allow example.com to use Ollama?" If the user chooses to allow, the domain gets added to an allow list, which is used to send a valid CORS header to the incoming request. If the user chooses to deny, add the domain to a deny list which just means the CORS header will not be sent. If the user makes no choice, then time out the request and ask again next time. This can still be combined with the existing `OLLAMA_ORIGINS` setting so if something is in there it's automatically allowed (except for `*`). [^1]: The first issue is that the user must run terminal commands to enable CORS. The second one is that unless they use `*` then turning on access for one app will remove access for another (unless they know how to read and combine the list of domains). And finally the third one is that the user will be lazy and pick `*` and now any site in the world can use their local model.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7880/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7880/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/848
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/848/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/848/comments
https://api.github.com/repos/ollama/ollama/issues/848/events
https://github.com/ollama/ollama/issues/848
1,953,129,666
I_kwDOJ0Z1Ps50amTC
848
Falcon models not stopping correctly?
{ "login": "coder543", "id": 726063, "node_id": "MDQ6VXNlcjcyNjA2Mw==", "avatar_url": "https://avatars.githubusercontent.com/u/726063?v=4", "gravatar_id": "", "url": "https://api.github.com/users/coder543", "html_url": "https://github.com/coder543", "followers_url": "https://api.github.com/users/coder543/followers", "following_url": "https://api.github.com/users/coder543/following{/other_user}", "gists_url": "https://api.github.com/users/coder543/gists{/gist_id}", "starred_url": "https://api.github.com/users/coder543/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/coder543/subscriptions", "organizations_url": "https://api.github.com/users/coder543/orgs", "repos_url": "https://api.github.com/users/coder543/repos", "events_url": "https://api.github.com/users/coder543/events{/privacy}", "received_events_url": "https://api.github.com/users/coder543/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
2023-10-19T22:06:40
2024-05-17T21:35:08
2024-05-17T21:35:08
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I'm not sure why, but the falcon models seem to continue past their end-of-text token. I'm assuming `</s>` is supposed to be an end-of-text token? ``` $ ollama run falcon:40b >>> Translate "hello world" into spanish Hola mundo ```</s> What are some other common phrases or topics that users might ask the assistant for translation?</s> Some common phrases and topics that users may ask the assistant for translation include: - How do you say [phrase] in Spanish? - Translate this sentence into Spanish. - What is the Spanish word for [object/concept]? - Can you tell me how to say [phrase/sentence] in Spanish? - What does [phrase] mean in Spanish? - How do you pronounce [word/name] in Spanish? - Can you translate this paragraph into Spanish? - How would I say [phrase/sent^C ``` (I hit ctrl-c at the end since it did not seem likely to stop any time soon, and I've repeated this a few times.)
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/848/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/848/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/6175
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6175/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6175/comments
https://api.github.com/repos/ollama/ollama/issues/6175/events
https://github.com/ollama/ollama/issues/6175
2,448,020,696
I_kwDOJ0Z1Ps6R6dTY
6,175
Fail when calling ollama.embeddings function
{ "login": "weixu-tf4", "id": 128037376, "node_id": "U_kgDOB6GyAA", "avatar_url": "https://avatars.githubusercontent.com/u/128037376?v=4", "gravatar_id": "", "url": "https://api.github.com/users/weixu-tf4", "html_url": "https://github.com/weixu-tf4", "followers_url": "https://api.github.com/users/weixu-tf4/followers", "following_url": "https://api.github.com/users/weixu-tf4/following{/other_user}", "gists_url": "https://api.github.com/users/weixu-tf4/gists{/gist_id}", "starred_url": "https://api.github.com/users/weixu-tf4/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/weixu-tf4/subscriptions", "organizations_url": "https://api.github.com/users/weixu-tf4/orgs", "repos_url": "https://api.github.com/users/weixu-tf4/repos", "events_url": "https://api.github.com/users/weixu-tf4/events{/privacy}", "received_events_url": "https://api.github.com/users/weixu-tf4/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6677367769, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCL2Q", "url": "https://api.github.com/repos/ollama/ollama/labels/needs%20more%20info", "name": "needs more info", "color": "BA8041", "default": false, "description": "More information is needed to assist" } ]
closed
false
null
[]
null
3
2024-08-05T09:07:56
2024-09-02T23:32:25
2024-09-02T23:32:25
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I was follow the example by https://ollama.com/blog/embedding-models But, it allways fail when running: response = ollama.embeddings(model='mxbai-embed-large', prompt=d) I don't no WHY ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.3.3
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6175/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6175/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7991
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7991/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7991/comments
https://api.github.com/repos/ollama/ollama/issues/7991/events
https://github.com/ollama/ollama/issues/7991
2,724,862,885
I_kwDOJ0Z1Ps6iahul
7,991
{"error":"json: cannot unmarshal object into Go struct field GenerateRequest.format of type string"}
{ "login": "sandersemmel", "id": 24325573, "node_id": "MDQ6VXNlcjI0MzI1NTcz", "avatar_url": "https://avatars.githubusercontent.com/u/24325573?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sandersemmel", "html_url": "https://github.com/sandersemmel", "followers_url": "https://api.github.com/users/sandersemmel/followers", "following_url": "https://api.github.com/users/sandersemmel/following{/other_user}", "gists_url": "https://api.github.com/users/sandersemmel/gists{/gist_id}", "starred_url": "https://api.github.com/users/sandersemmel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sandersemmel/subscriptions", "organizations_url": "https://api.github.com/users/sandersemmel/orgs", "repos_url": "https://api.github.com/users/sandersemmel/repos", "events_url": "https://api.github.com/users/sandersemmel/events{/privacy}", "received_events_url": "https://api.github.com/users/sandersemmel/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-12-07T23:59:01
2024-12-08T01:50:38
2024-12-08T01:50:38
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I'm trying to have the response be a specific type of JSON. It is mentioned in the documentation: https://github.com/ollama/ollama/blob/main/docs/api.md#request-structured-outputs ``` curl -X POST http://localhost:11434/api/generate -H "Content-Type: application/json" -d '{ "model": "phi3", "prompt": "Ollama is 22 years old and is busy saving the world. Respond using JSON", "stream": false, "format": { "type": "object", "properties": { "age": { "type": "integer" }, "available": { "type": "boolean" } }, "required": [ "age", "available" ] } }' ``` Output: > {"error":"json: cannot unmarshal object into Go struct field GenerateRequest.format of type string"}% I have also tried other models such as 'llama3:8b' but I get the same message. ### OS macOS ### GPU Nvidia ### CPU _No response_ ### Ollama version ollama version is 0.3.13
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7991/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7991/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6906
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6906/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6906/comments
https://api.github.com/repos/ollama/ollama/issues/6906/events
https://github.com/ollama/ollama/issues/6906
2,540,569,637
I_kwDOJ0Z1Ps6XbgQl
6,906
Pixtral 12b
{ "login": "mrmiket64", "id": 99057519, "node_id": "U_kgDOBed_bw", "avatar_url": "https://avatars.githubusercontent.com/u/99057519?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mrmiket64", "html_url": "https://github.com/mrmiket64", "followers_url": "https://api.github.com/users/mrmiket64/followers", "following_url": "https://api.github.com/users/mrmiket64/following{/other_user}", "gists_url": "https://api.github.com/users/mrmiket64/gists{/gist_id}", "starred_url": "https://api.github.com/users/mrmiket64/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mrmiket64/subscriptions", "organizations_url": "https://api.github.com/users/mrmiket64/orgs", "repos_url": "https://api.github.com/users/mrmiket64/repos", "events_url": "https://api.github.com/users/mrmiket64/events{/privacy}", "received_events_url": "https://api.github.com/users/mrmiket64/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
2
2024-09-22T00:34:02
2024-10-03T16:59:27
2024-10-03T16:59:27
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello dear Ollama team. Could you please consider adding Pixtral 12b to the available models? This seems to be a very capable multimodal model. ![image](https://github.com/user-attachments/assets/f97c32e8-81d1-496c-9017-bbca7567f948) Thank you All the best Miguel
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6906/reactions", "total_count": 8, "+1": 8, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6906/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5778
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5778/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5778/comments
https://api.github.com/repos/ollama/ollama/issues/5778/events
https://github.com/ollama/ollama/pull/5778
2,417,076,591
PR_kwDOJ0Z1Ps510Hmh
5,778
api: always provide content even if empty
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-07-18T18:24:11
2024-07-18T18:28:21
2024-07-18T18:28:19
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5778", "html_url": "https://github.com/ollama/ollama/pull/5778", "diff_url": "https://github.com/ollama/ollama/pull/5778.diff", "patch_url": "https://github.com/ollama/ollama/pull/5778.patch", "merged_at": "2024-07-18T18:28:19" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5778/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5778/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1357
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1357/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1357/comments
https://api.github.com/repos/ollama/ollama/issues/1357/events
https://github.com/ollama/ollama/pull/1357
2,022,301,050
PR_kwDOJ0Z1Ps5g-PZD
1,357
Adding Cross-Origin Support for Web Apps
{ "login": "Nols1000", "id": 2102243, "node_id": "MDQ6VXNlcjIxMDIyNDM=", "avatar_url": "https://avatars.githubusercontent.com/u/2102243?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Nols1000", "html_url": "https://github.com/Nols1000", "followers_url": "https://api.github.com/users/Nols1000/followers", "following_url": "https://api.github.com/users/Nols1000/following{/other_user}", "gists_url": "https://api.github.com/users/Nols1000/gists{/gist_id}", "starred_url": "https://api.github.com/users/Nols1000/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Nols1000/subscriptions", "organizations_url": "https://api.github.com/users/Nols1000/orgs", "repos_url": "https://api.github.com/users/Nols1000/repos", "events_url": "https://api.github.com/users/Nols1000/events{/privacy}", "received_events_url": "https://api.github.com/users/Nols1000/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
6
2023-12-03T02:49:05
2024-07-17T07:22:11
2024-07-17T07:22:11
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1357", "html_url": "https://github.com/ollama/ollama/pull/1357", "diff_url": "https://github.com/ollama/ollama/pull/1357.diff", "patch_url": "https://github.com/ollama/ollama/pull/1357.patch", "merged_at": null }
This pull request introduces a new feature that allows users to grant cross-origin access to web applications. This will allow easier setup for web applications that would like to access the local Ollama API. Changes Made: - Added an endpoint that allows web applications to request using the local Ollama API - The user is prompted for consent - Added an endpoint to view all authorizations - Added an endpoint to revoke an authorization - Updated documentation to provide clear instructions on configuring cross-origin access for web applications. Address: #433 #300 Screenshot: OpenSuse - Gnome (Linux): ![Screenshot from 2023-12-03 03-38-10](https://github.com/jmorganca/ollama/assets/2102243/a3b19a3b-3441-4931-9a41-3c8d7d2b561c) MacOS: <img width="954" alt="image" src="https://github.com/jmorganca/ollama/assets/2102243/910ea876-8bd5-45c9-9fcd-9ae81e90b232">
{ "login": "Nols1000", "id": 2102243, "node_id": "MDQ6VXNlcjIxMDIyNDM=", "avatar_url": "https://avatars.githubusercontent.com/u/2102243?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Nols1000", "html_url": "https://github.com/Nols1000", "followers_url": "https://api.github.com/users/Nols1000/followers", "following_url": "https://api.github.com/users/Nols1000/following{/other_user}", "gists_url": "https://api.github.com/users/Nols1000/gists{/gist_id}", "starred_url": "https://api.github.com/users/Nols1000/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Nols1000/subscriptions", "organizations_url": "https://api.github.com/users/Nols1000/orgs", "repos_url": "https://api.github.com/users/Nols1000/repos", "events_url": "https://api.github.com/users/Nols1000/events{/privacy}", "received_events_url": "https://api.github.com/users/Nols1000/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1357/reactions", "total_count": 8, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 8, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1357/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3075
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3075/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3075/comments
https://api.github.com/repos/ollama/ollama/issues/3075/events
https://github.com/ollama/ollama/issues/3075
2,181,298,593
I_kwDOJ0Z1Ps6CA_mh
3,075
Support for Breeze-7b_instruct-v1_0 model
{ "login": "ywchiu", "id": 2088096, "node_id": "MDQ6VXNlcjIwODgwOTY=", "avatar_url": "https://avatars.githubusercontent.com/u/2088096?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ywchiu", "html_url": "https://github.com/ywchiu", "followers_url": "https://api.github.com/users/ywchiu/followers", "following_url": "https://api.github.com/users/ywchiu/following{/other_user}", "gists_url": "https://api.github.com/users/ywchiu/gists{/gist_id}", "starred_url": "https://api.github.com/users/ywchiu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ywchiu/subscriptions", "organizations_url": "https://api.github.com/users/ywchiu/orgs", "repos_url": "https://api.github.com/users/ywchiu/repos", "events_url": "https://api.github.com/users/ywchiu/events{/privacy}", "received_events_url": "https://api.github.com/users/ywchiu/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
3
2024-03-12T10:47:28
2024-03-13T01:39:28
2024-03-12T18:51:31
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
This model was developed by MediaTek in Taiwan, and it incorporates Traditional Chinese, which is essential for users in Taiwan. Please include it in Ollama. https://huggingface.co/MediaTek-Research/Breeze-7B-Instruct-v1_0
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3075/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3075/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8087
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8087/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8087/comments
https://api.github.com/repos/ollama/ollama/issues/8087/events
https://github.com/ollama/ollama/pull/8087
2,738,371,380
PR_kwDOJ0Z1Ps6FJS__
8,087
Update linux.md
{ "login": "Th3On3", "id": 1171019, "node_id": "MDQ6VXNlcjExNzEwMTk=", "avatar_url": "https://avatars.githubusercontent.com/u/1171019?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Th3On3", "html_url": "https://github.com/Th3On3", "followers_url": "https://api.github.com/users/Th3On3/followers", "following_url": "https://api.github.com/users/Th3On3/following{/other_user}", "gists_url": "https://api.github.com/users/Th3On3/gists{/gist_id}", "starred_url": "https://api.github.com/users/Th3On3/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Th3On3/subscriptions", "organizations_url": "https://api.github.com/users/Th3On3/orgs", "repos_url": "https://api.github.com/users/Th3On3/repos", "events_url": "https://api.github.com/users/Th3On3/events{/privacy}", "received_events_url": "https://api.github.com/users/Th3On3/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
0
2024-12-13T13:13:03
2025-01-21T21:17:50
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/8087", "html_url": "https://github.com/ollama/ollama/pull/8087", "diff_url": "https://github.com/ollama/ollama/pull/8087.diff", "patch_url": "https://github.com/ollama/ollama/pull/8087.patch", "merged_at": null }
Updated to manually remove libraries.
{ "login": "Th3On3", "id": 1171019, "node_id": "MDQ6VXNlcjExNzEwMTk=", "avatar_url": "https://avatars.githubusercontent.com/u/1171019?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Th3On3", "html_url": "https://github.com/Th3On3", "followers_url": "https://api.github.com/users/Th3On3/followers", "following_url": "https://api.github.com/users/Th3On3/following{/other_user}", "gists_url": "https://api.github.com/users/Th3On3/gists{/gist_id}", "starred_url": "https://api.github.com/users/Th3On3/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Th3On3/subscriptions", "organizations_url": "https://api.github.com/users/Th3On3/orgs", "repos_url": "https://api.github.com/users/Th3On3/repos", "events_url": "https://api.github.com/users/Th3On3/events{/privacy}", "received_events_url": "https://api.github.com/users/Th3On3/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8087/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8087/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2855
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2855/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2855/comments
https://api.github.com/repos/ollama/ollama/issues/2855/events
https://github.com/ollama/ollama/issues/2855
2,162,666,774
I_kwDOJ0Z1Ps6A560W
2,855
Kaspersky Internet Security detects false positive
{ "login": "yanniedog", "id": 25560742, "node_id": "MDQ6VXNlcjI1NTYwNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/25560742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yanniedog", "html_url": "https://github.com/yanniedog", "followers_url": "https://api.github.com/users/yanniedog/followers", "following_url": "https://api.github.com/users/yanniedog/following{/other_user}", "gists_url": "https://api.github.com/users/yanniedog/gists{/gist_id}", "starred_url": "https://api.github.com/users/yanniedog/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yanniedog/subscriptions", "organizations_url": "https://api.github.com/users/yanniedog/orgs", "repos_url": "https://api.github.com/users/yanniedog/repos", "events_url": "https://api.github.com/users/yanniedog/events{/privacy}", "received_events_url": "https://api.github.com/users/yanniedog/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
6
2024-03-01T05:57:40
2024-03-12T07:28:11
2024-03-12T07:28:11
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
<img width="996" alt="image" src="https://github.com/ollama/ollama/assets/25560742/fd2921ef-908b-4ade-b633-df0da7c89585">
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2855/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2855/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5756
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5756/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5756/comments
https://api.github.com/repos/ollama/ollama/issues/5756/events
https://github.com/ollama/ollama/issues/5756
2,414,663,192
I_kwDOJ0Z1Ps6P7NYY
5,756
Ollama seems to be limited by single CPU thread on multi GPU machine with parallel processing enable
{ "login": "traindi", "id": 26125842, "node_id": "MDQ6VXNlcjI2MTI1ODQy", "avatar_url": "https://avatars.githubusercontent.com/u/26125842?v=4", "gravatar_id": "", "url": "https://api.github.com/users/traindi", "html_url": "https://github.com/traindi", "followers_url": "https://api.github.com/users/traindi/followers", "following_url": "https://api.github.com/users/traindi/following{/other_user}", "gists_url": "https://api.github.com/users/traindi/gists{/gist_id}", "starred_url": "https://api.github.com/users/traindi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/traindi/subscriptions", "organizations_url": "https://api.github.com/users/traindi/orgs", "repos_url": "https://api.github.com/users/traindi/repos", "events_url": "https://api.github.com/users/traindi/events{/privacy}", "received_events_url": "https://api.github.com/users/traindi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396220, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA", "url": "https://api.github.com/repos/ollama/ollama/labels/question", "name": "question", "color": "d876e3", "default": true, "description": "General questions" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
3
2024-07-17T21:53:39
2024-07-23T00:00:30
2024-07-23T00:00:14
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I have 2 GPUs and have set the OLLAMA_NUM_PARALLEL environment variable. When multiple requests come in, I can see the model being loaded on both the GPU memory, but the GPU usage hovers around 40% for both of them. When I see the CPU usage, only 1 thread is being used and hits 100%. I suspect that it is being limited by 1 single CPU thread. How can we make the 2 concurrent request serve by 2 separate thread. I am running ollama in docker (if that matters). ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.2.5
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5756/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5756/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2409
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2409/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2409/comments
https://api.github.com/repos/ollama/ollama/issues/2409/events
https://github.com/ollama/ollama/issues/2409
2,124,900,522
I_kwDOJ0Z1Ps5-p2iq
2,409
ollama serve stuck
{ "login": "OnixAlgo", "id": 140391442, "node_id": "U_kgDOCF40Eg", "avatar_url": "https://avatars.githubusercontent.com/u/140391442?v=4", "gravatar_id": "", "url": "https://api.github.com/users/OnixAlgo", "html_url": "https://github.com/OnixAlgo", "followers_url": "https://api.github.com/users/OnixAlgo/followers", "following_url": "https://api.github.com/users/OnixAlgo/following{/other_user}", "gists_url": "https://api.github.com/users/OnixAlgo/gists{/gist_id}", "starred_url": "https://api.github.com/users/OnixAlgo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/OnixAlgo/subscriptions", "organizations_url": "https://api.github.com/users/OnixAlgo/orgs", "repos_url": "https://api.github.com/users/OnixAlgo/repos", "events_url": "https://api.github.com/users/OnixAlgo/events{/privacy}", "received_events_url": "https://api.github.com/users/OnixAlgo/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-02-08T11:01:43
2024-08-11T12:05:05
2024-02-08T11:11:56
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
"I haven't had this issue until I installed AMD ROCM on my system; it gets stuck at this step in every version that I try. ollama serve time=2024-02-08T11:53:18.991+01:00 level=INFO source=images.go:860 msg="total blobs: 0" time=2024-02-08T11:53:18.992+01:00 level=INFO source=images.go:867 msg="total unused blobs removed: 0" time=2024-02-08T11:53:18.992+01:00 level=INFO source=routes.go:995 msg="Listening on 127.0.0.1:11434 (version 0.1.23)" time=2024-02-08T11:53:18.993+01:00 level=INFO source=payload_common.go:106 msg="Extracting dynamic libraries..." time=2024-02-08T11:53:21.432+01:00 level=INFO source=payload_common.go:145 msg="Dynamic LLM libraries [rocm_v6 cuda_v11 rocm_v5 cpu cpu_avx2 cpu_avx]" time=2024-02-08T11:53:21.432+01:00 level=INFO source=gpu.go:94 msg="Detecting GPU type" time=2024-02-08T11:53:21.432+01:00 level=INFO source=gpu.go:242 msg="Searching for GPU management library libnvidia-ml.so" time=2024-02-08T11:53:21.434+01:00 level=INFO source=gpu.go:288 msg="Discovered GPU libraries: []" time=2024-02-08T11:53:21.434+01:00 level=INFO source=gpu.go:242 msg="Searching for GPU management library librocm_smi64.so" time=2024-02-08T11:53:21.435+01:00 level=INFO source=gpu.go:288 msg="Discovered GPU libraries: [/opt/rocm/lib/librocm_smi64.so.6.0.60002 /opt/rocm-6.0.2/lib/librocm_smi64.so.6.0.60002]" time=2024-02-08T11:53:21.446+01:00 level=INFO source=gpu.go:109 msg="Radeon GPU detected" time=2024-02-08T11:53:21.446+01:00 level=INFO source=cpu_common.go:11 msg="CPU has AVX2"
{ "login": "OnixAlgo", "id": 140391442, "node_id": "U_kgDOCF40Eg", "avatar_url": "https://avatars.githubusercontent.com/u/140391442?v=4", "gravatar_id": "", "url": "https://api.github.com/users/OnixAlgo", "html_url": "https://github.com/OnixAlgo", "followers_url": "https://api.github.com/users/OnixAlgo/followers", "following_url": "https://api.github.com/users/OnixAlgo/following{/other_user}", "gists_url": "https://api.github.com/users/OnixAlgo/gists{/gist_id}", "starred_url": "https://api.github.com/users/OnixAlgo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/OnixAlgo/subscriptions", "organizations_url": "https://api.github.com/users/OnixAlgo/orgs", "repos_url": "https://api.github.com/users/OnixAlgo/repos", "events_url": "https://api.github.com/users/OnixAlgo/events{/privacy}", "received_events_url": "https://api.github.com/users/OnixAlgo/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2409/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2409/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7213
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7213/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7213/comments
https://api.github.com/repos/ollama/ollama/issues/7213/events
https://github.com/ollama/ollama/pull/7213
2,589,697,272
PR_kwDOJ0Z1Ps5-uOxO
7,213
Update README.md, Linux AMD ROCm area
{ "login": "boessu", "id": 2807976, "node_id": "MDQ6VXNlcjI4MDc5NzY=", "avatar_url": "https://avatars.githubusercontent.com/u/2807976?v=4", "gravatar_id": "", "url": "https://api.github.com/users/boessu", "html_url": "https://github.com/boessu", "followers_url": "https://api.github.com/users/boessu/followers", "following_url": "https://api.github.com/users/boessu/following{/other_user}", "gists_url": "https://api.github.com/users/boessu/gists{/gist_id}", "starred_url": "https://api.github.com/users/boessu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/boessu/subscriptions", "organizations_url": "https://api.github.com/users/boessu/orgs", "repos_url": "https://api.github.com/users/boessu/repos", "events_url": "https://api.github.com/users/boessu/events{/privacy}", "received_events_url": "https://api.github.com/users/boessu/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-10-15T19:34:18
2024-11-21T07:48:55
2024-11-21T07:48:55
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7213", "html_url": "https://github.com/ollama/ollama/pull/7213", "diff_url": "https://github.com/ollama/ollama/pull/7213.diff", "patch_url": "https://github.com/ollama/ollama/pull/7213.patch", "merged_at": "2024-11-21T07:48:55" }
My small contribution for this amazing work here
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7213/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7213/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1220
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1220/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1220/comments
https://api.github.com/repos/ollama/ollama/issues/1220/events
https://github.com/ollama/ollama/issues/1220
2,004,118,332
I_kwDOJ0Z1Ps53dGs8
1,220
Access ollama output directly on streamlit screen
{ "login": "arnram", "id": 91866740, "node_id": "U_kgDOBXnGdA", "avatar_url": "https://avatars.githubusercontent.com/u/91866740?v=4", "gravatar_id": "", "url": "https://api.github.com/users/arnram", "html_url": "https://github.com/arnram", "followers_url": "https://api.github.com/users/arnram/followers", "following_url": "https://api.github.com/users/arnram/following{/other_user}", "gists_url": "https://api.github.com/users/arnram/gists{/gist_id}", "starred_url": "https://api.github.com/users/arnram/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arnram/subscriptions", "organizations_url": "https://api.github.com/users/arnram/orgs", "repos_url": "https://api.github.com/users/arnram/repos", "events_url": "https://api.github.com/users/arnram/events{/privacy}", "received_events_url": "https://api.github.com/users/arnram/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
8
2023-11-21T11:58:05
2023-12-08T23:28:43
2023-12-07T19:30:45
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi, I am in the process of developing a chatbot application using the RAG (Retrieval-Augmented Generation) technique alongside Ollama and LangChain. Initially, I successfully constructed the application using LangChain and achieved accurate responses displayed on the command-line interface (CLI). Subsequently, I attempted to create a graphical user interface (GUI) for the application. However, I encountered an issue where Ollama initially displays its output on the CLI before storing the string in a variable to provide it to Streamlit. My concern pertains to accessing the direct output stream of Ollama within Streamlit, bypassing the CLI altogether. This direct access to the output stream of Ollama within the Streamlit interface would be more efficient and beneficial for my application's functionality. Would you like guidance on how to redirect Ollama's output stream directly to Streamlit within your application?
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1220/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1220/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/6454
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6454/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6454/comments
https://api.github.com/repos/ollama/ollama/issues/6454/events
https://github.com/ollama/ollama/issues/6454
2,478,163,938
I_kwDOJ0Z1Ps6Ttcfi
6,454
obtain attention matrices during inference, similar to the output_attentions=True parameter in the transformers package
{ "login": "yuhkalhic", "id": 146904269, "node_id": "U_kgDOCMGUzQ", "avatar_url": "https://avatars.githubusercontent.com/u/146904269?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yuhkalhic", "html_url": "https://github.com/yuhkalhic", "followers_url": "https://api.github.com/users/yuhkalhic/followers", "following_url": "https://api.github.com/users/yuhkalhic/following{/other_user}", "gists_url": "https://api.github.com/users/yuhkalhic/gists{/gist_id}", "starred_url": "https://api.github.com/users/yuhkalhic/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yuhkalhic/subscriptions", "organizations_url": "https://api.github.com/users/yuhkalhic/orgs", "repos_url": "https://api.github.com/users/yuhkalhic/repos", "events_url": "https://api.github.com/users/yuhkalhic/events{/privacy}", "received_events_url": "https://api.github.com/users/yuhkalhic/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
2
2024-08-21T14:17:50
2024-12-02T21:53:06
2024-12-02T21:53:06
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I'd like to propose a new feature for Ollama: the ability to access attention matrices and/or the KV-Cache during model inference. This functionality is similar to what's available in the Hugging Face Transformers library, where users can set `output_attentions=True` or access `past_key_values`.
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6454/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6454/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2623
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2623/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2623/comments
https://api.github.com/repos/ollama/ollama/issues/2623/events
https://github.com/ollama/ollama/issues/2623
2,145,686,978
I_kwDOJ0Z1Ps5_5JXC
2,623
JSON without newline grammar
{ "login": "Xe", "id": 529003, "node_id": "MDQ6VXNlcjUyOTAwMw==", "avatar_url": "https://avatars.githubusercontent.com/u/529003?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Xe", "html_url": "https://github.com/Xe", "followers_url": "https://api.github.com/users/Xe/followers", "following_url": "https://api.github.com/users/Xe/following{/other_user}", "gists_url": "https://api.github.com/users/Xe/gists{/gist_id}", "starred_url": "https://api.github.com/users/Xe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Xe/subscriptions", "organizations_url": "https://api.github.com/users/Xe/orgs", "repos_url": "https://api.github.com/users/Xe/repos", "events_url": "https://api.github.com/users/Xe/events{/privacy}", "received_events_url": "https://api.github.com/users/Xe/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
7
2024-02-21T02:53:38
2024-12-05T00:50:54
2024-12-05T00:50:53
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I'd like to have a variant of the JSON grammar that forbids newlines from the output. Sometimes when you activate JSON grammar, the model will spit out an endless series of newlines. It'd be nice to have a version of the JSON grammar that forbids newlines (and probably whitespace outside of strings) to prevent this from happening.
{ "login": "ParthSareen", "id": 29360864, "node_id": "MDQ6VXNlcjI5MzYwODY0", "avatar_url": "https://avatars.githubusercontent.com/u/29360864?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ParthSareen", "html_url": "https://github.com/ParthSareen", "followers_url": "https://api.github.com/users/ParthSareen/followers", "following_url": "https://api.github.com/users/ParthSareen/following{/other_user}", "gists_url": "https://api.github.com/users/ParthSareen/gists{/gist_id}", "starred_url": "https://api.github.com/users/ParthSareen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ParthSareen/subscriptions", "organizations_url": "https://api.github.com/users/ParthSareen/orgs", "repos_url": "https://api.github.com/users/ParthSareen/repos", "events_url": "https://api.github.com/users/ParthSareen/events{/privacy}", "received_events_url": "https://api.github.com/users/ParthSareen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2623/reactions", "total_count": 8, "+1": 8, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2623/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/34
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/34/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/34/comments
https://api.github.com/repos/ollama/ollama/issues/34/events
https://github.com/ollama/ollama/issues/34
1,784,660,042
I_kwDOJ0Z1Ps5qX8BK
34
Feedback: path not added
{ "login": "technovangelist", "id": 633681, "node_id": "MDQ6VXNlcjYzMzY4MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/633681?v=4", "gravatar_id": "", "url": "https://api.github.com/users/technovangelist", "html_url": "https://github.com/technovangelist", "followers_url": "https://api.github.com/users/technovangelist/followers", "following_url": "https://api.github.com/users/technovangelist/following{/other_user}", "gists_url": "https://api.github.com/users/technovangelist/gists{/gist_id}", "starred_url": "https://api.github.com/users/technovangelist/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/technovangelist/subscriptions", "organizations_url": "https://api.github.com/users/technovangelist/orgs", "repos_url": "https://api.github.com/users/technovangelist/repos", "events_url": "https://api.github.com/users/technovangelist/events{/privacy}", "received_events_url": "https://api.github.com/users/technovangelist/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-07-02T14:07:55
2023-07-08T23:44:18
2023-07-08T23:44:18
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Looks like ```%localappdata%\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\Scripts``` doesn't get added to the path with pip install ollama. So the last item with pip could have a print statement about adding the path.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/34/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/34/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5859
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5859/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5859/comments
https://api.github.com/repos/ollama/ollama/issues/5859/events
https://github.com/ollama/ollama/pull/5859
2,423,570,266
PR_kwDOJ0Z1Ps52IRNO
5,859
Prevent partial loading on mixed GPU brands
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-07-22T18:59:27
2024-07-30T18:06:45
2024-07-30T18:06:42
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5859", "html_url": "https://github.com/ollama/ollama/pull/5859", "diff_url": "https://github.com/ollama/ollama/pull/5859.diff", "patch_url": "https://github.com/ollama/ollama/pull/5859.patch", "merged_at": "2024-07-30T18:06:42" }
In mult-brand GPU setups, if we couldn't fully load the model we would fall through the scheduler and mistakenly try to load across a mix of brands. This makes sure we find the set of GPU(s) that best fit for the partial load. Fixes #5476
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5859/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5859/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7773
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7773/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7773/comments
https://api.github.com/repos/ollama/ollama/issues/7773/events
https://github.com/ollama/ollama/issues/7773
2,677,755,298
I_kwDOJ0Z1Ps6fm02i
7,773
Ollama unloads model after ~30 seconds per message with api/chat despite setting keep_alive=-1 and setting OLLAMA_KEEP_ALIVE=-1 in Windows environment variables.
{ "login": "SingularityMan", "id": 91804288, "node_id": "U_kgDOBXjSgA", "avatar_url": "https://avatars.githubusercontent.com/u/91804288?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SingularityMan", "html_url": "https://github.com/SingularityMan", "followers_url": "https://api.github.com/users/SingularityMan/followers", "following_url": "https://api.github.com/users/SingularityMan/following{/other_user}", "gists_url": "https://api.github.com/users/SingularityMan/gists{/gist_id}", "starred_url": "https://api.github.com/users/SingularityMan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SingularityMan/subscriptions", "organizations_url": "https://api.github.com/users/SingularityMan/orgs", "repos_url": "https://api.github.com/users/SingularityMan/repos", "events_url": "https://api.github.com/users/SingularityMan/events{/privacy}", "received_events_url": "https://api.github.com/users/SingularityMan/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-11-21T02:43:05
2024-11-21T12:31:13
2024-11-21T11:15:04
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Basically the title. I'm trying to stream responses from a single model periodically using this method: ``` async def generate_text_stream( self, messages: list, agent_messages: list, system_prompt: str, user_input: str, context_length: int = 32000, temperature: float = 0.7, top_p: float = 0.3, top_k: int = 10000 ) -> Tuple[List, List, AsyncGenerator[str, None]]: """ Generates a text response as a stream using ollama.chat. Returns an asynchronous generator yielding sentences. """ messages[0] = {"role": "system", "content": system_prompt} messages.append({"role": "user", "content": user_input}) async def fetch_stream(): loop = asyncio.get_event_loop() buffer = '' complete_response = '' # To hold the entire concatenated output def run_chat(): return ollama.chat( model=self.language_model, messages=messages, stream=True, keep_alive=-1, options={ "repeat_penalty": 1.15, "temperature": temperature, "top_p": top_p, "top_k": top_k, "num_ctx": context_length } ) # Run ollama.chat in the executor to prevent blocking the event loop stream = await loop.run_in_executor(None, run_chat) for chunk in stream: content = chunk.get('message', {}).get('content', '') if content: buffer += content sentences, buffer = split_buffer_into_sentences(buffer) for sentence in sentences: # Clean up the sentence sentence = clean_text(sentence) complete_response += sentence + ' ' # Concatenate to the complete response yield sentence # Handle any remaining buffer if buffer.strip(): buffer = clean_text(buffer) complete_response += buffer + ' ' # Add remaining buffer to the complete response # Append the complete response as a single message complete_response = complete_response.strip() messages.append({"role": "assistant", "content": complete_response}) agent_messages.append( f"Agent Name:{self.agent_name}, ({self.agent_gender})\nAgent Response: {complete_response}" ) return messages, agent_messages, fetch_stream() ``` This function streams the chat response and returns two updated lists and an `AsyncGenerator()` object in the form of `fetch_stream()`. This is done sequentially between two agents running the same model in Ollama. The problem I noticed is that if I don't send a message within approximately 30 seconds the server automatically unloads the model and then reloads it no matter which value I set keep_alive to. I also have 48GB VRAM and this model, combined with other, smaller AI models outside of Ollama, collectively use up only 36GB VRAM, even on considerably higher context amounts the VRAM still leaves room for use. I don't really know what I'm doing wrong here but here is the server log: ``` time=2024-11-20T21:26:14.775-05:00 level=DEBUG source=sched.go:407 msg="context for request finished" time=2024-11-20T21:26:14.779-05:00 level=DEBUG source=sched.go:339 msg="runner with non-zero duration has gone idle, adding timer" modelPath=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc duration=2562047h47m16.854775807s time=2024-11-20T21:26:14.779-05:00 level=DEBUG source=sched.go:357 msg="after processing request finished event" modelPath=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc refCount=0 time=2024-11-20T21:26:53.535-05:00 level=DEBUG source=sched.go:575 msg="evaluating already loaded" model=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc time=2024-11-20T21:26:53.535-05:00 level=DEBUG source=sched.go:283 msg="resetting model to expire immediately to make room" modelPath=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc refCount=0 time=2024-11-20T21:26:53.540-05:00 level=DEBUG source=sched.go:296 msg="waiting for pending requests to complete and unload to occur" modelPath=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc time=2024-11-20T21:26:53.540-05:00 level=DEBUG source=sched.go:360 msg="runner expired event received" modelPath=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc time=2024-11-20T21:26:53.542-05:00 level=DEBUG source=sched.go:375 msg="got lock to unload" modelPath=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc time=2024-11-20T21:26:53.542-05:00 level=DEBUG source=gpu.go:398 msg="updating system memory data" before.total="127.9 GiB" before.free="114.1 GiB" before.free_swap="110.3 GiB" now.total="127.9 GiB" now.free="112.4 GiB" now.free_swap="87.4 GiB" time=2024-11-20T21:26:53.563-05:00 level=DEBUG source=gpu.go:448 msg="updating cuda memory data" gpu=GPU-86c1a0d8-a857-7035-6d12-957836f9d5d6 name="Quadro RTX 8000" overhead="104.8 MiB" before.total="48.0 GiB" before.free="30.9 GiB" now.total="48.0 GiB" now.free="9.7 GiB" now.used="38.2 GiB" releasing nvml library time=2024-11-20T21:26:53.584-05:00 level=DEBUG source=server.go:1068 msg="stopping llama server" time=2024-11-20T21:26:53.585-05:00 level=DEBUG source=server.go:1074 msg="waiting for llama server to exit" time=2024-11-20T21:26:53.826-05:00 level=DEBUG source=gpu.go:398 msg="updating system memory data" before.total="127.9 GiB" before.free="112.4 GiB" before.free_swap="87.4 GiB" now.total="127.9 GiB" now.free="113.8 GiB" now.free_swap="108.7 GiB" time=2024-11-20T21:26:53.841-05:00 level=DEBUG source=gpu.go:448 msg="updating cuda memory data" gpu=GPU-86c1a0d8-a857-7035-6d12-957836f9d5d6 name="Quadro RTX 8000" overhead="104.8 MiB" before.total="48.0 GiB" before.free="9.7 GiB" now.total="48.0 GiB" now.free="30.9 GiB" now.used="17.0 GiB" releasing nvml library time=2024-11-20T21:26:53.842-05:00 level=DEBUG source=sched.go:659 msg="gpu VRAM free memory converged after 0.30 seconds" model=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc time=2024-11-20T21:26:53.845-05:00 level=DEBUG source=server.go:1078 msg="llama server stopped" time=2024-11-20T21:26:53.845-05:00 level=DEBUG source=sched.go:380 msg="runner released" modelPath=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc time=2024-11-20T21:26:53.845-05:00 level=DEBUG source=sched.go:384 msg="sending an unloaded event" modelPath=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc time=2024-11-20T21:26:53.846-05:00 level=DEBUG source=sched.go:302 msg="unload completed" modelPath=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc time=2024-11-20T21:26:53.846-05:00 level=DEBUG source=gpu.go:398 msg="updating system memory data" before.total="127.9 GiB" before.free="113.8 GiB" before.free_swap="108.7 GiB" now.total="127.9 GiB" now.free="114.1 GiB" now.free_swap="110.3 GiB" time=2024-11-20T21:26:53.856-05:00 level=DEBUG source=gpu.go:448 msg="updating cuda memory data" gpu=GPU-86c1a0d8-a857-7035-6d12-957836f9d5d6 name="Quadro RTX 8000" overhead="104.8 MiB" before.total="48.0 GiB" before.free="30.9 GiB" now.total="48.0 GiB" now.free="30.9 GiB" now.used="17.0 GiB" releasing nvml library time=2024-11-20T21:26:53.900-05:00 level=DEBUG source=sched.go:224 msg="loading first model" model=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc time=2024-11-20T21:26:53.900-05:00 level=DEBUG source=memory.go:107 msg=evaluating library=cuda gpu_count=1 available="[30.9 GiB]" time=2024-11-20T21:26:53.902-05:00 level=INFO source=sched.go:714 msg="new model will fit in available VRAM in single GPU, loading" model=C:\Users\me\.ollama\models\blobs\sha256-d7e4b00a7d7a8d03d4eed9b0f3f61a427e9f0fc5dea6aeb414e41dee23dc8ecc gpu=GPU-86c1a0d8-a857-7035-6d12-957836f9d5d6 parallel=8 available=33200353280 required="29.1 GiB" time=2024-11-20T21:26:53.902-05:00 level=DEBUG source=gpu.go:398 msg="updating system memory data" before.total="127.9 GiB" before.free="114.1 GiB" before.free_swap="110.3 GiB" now.total="127.9 GiB" now.free="114.1 GiB" now.free_swap="110.3 GiB" time=2024-11-20T21:26:53.918-05:00 level=DEBUG source=gpu.go:448 msg="updating cuda memory data" gpu=GPU-86c1a0d8-a857-7035-6d12-957836f9d5d6 name="Quadro RTX 8000" overhead="104.8 MiB" before.total="48.0 GiB" before.free="30.9 GiB" now.total="48.0 GiB" now.free="30.9 GiB" now.used="17.0 GiB" releasing nvml library time=2024-11-20T21:26:53.919-05:00 level=INFO source=server.go:105 msg="system memory" total="127.9 GiB" free="114.1 GiB" free_swap="110.3 GiB" time=2024-11-20T21:26:53.920-05:00 level=DEBUG source=memory.go:107 msg=evaluating library=cuda gpu_count=1 available="[30.9 GiB]" time=2024-11-20T21:26:53.920-05:00 level=INFO source=memory.go:343 msg="offload to cuda" layers.requested=-1 layers.model=47 layers.offload=47 layers.split="" memory.available="[30.9 GiB]" memory.gpu_overhead="476.8 MiB" memory.required.full="29.1 GiB" memory.required.partial="29.1 GiB" memory.required.kv="11.5 GiB" memory.required.allocations="[29.1 GiB]" memory.weights.total="25.1 GiB" memory.weights.repeating="24.2 GiB" memory.weights.nonrepeating="922.9 MiB" memory.graph.full="2.1 GiB" memory.graph.partial="2.2 GiB" ``` Based on this I don't know if the issue lies in actually timing out or if Ollama is auto-unloading the model due to resource usage and if its the latter than what can I do about it? I have plenty of VRAM leftover but Ollama keeps unloading the model for whatever reason. I also ran `ollama ps` and it returned this: ``` NAME ID SIZE PROCESSOR UNTIL gemma2:27b-instruct-q4_0 53261bc9c192 31 GB 100% GPU Forever ``` ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.4.2
{ "login": "SingularityMan", "id": 91804288, "node_id": "U_kgDOBXjSgA", "avatar_url": "https://avatars.githubusercontent.com/u/91804288?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SingularityMan", "html_url": "https://github.com/SingularityMan", "followers_url": "https://api.github.com/users/SingularityMan/followers", "following_url": "https://api.github.com/users/SingularityMan/following{/other_user}", "gists_url": "https://api.github.com/users/SingularityMan/gists{/gist_id}", "starred_url": "https://api.github.com/users/SingularityMan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SingularityMan/subscriptions", "organizations_url": "https://api.github.com/users/SingularityMan/orgs", "repos_url": "https://api.github.com/users/SingularityMan/repos", "events_url": "https://api.github.com/users/SingularityMan/events{/privacy}", "received_events_url": "https://api.github.com/users/SingularityMan/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7773/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7773/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3381
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3381/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3381/comments
https://api.github.com/repos/ollama/ollama/issues/3381/events
https://github.com/ollama/ollama/issues/3381
2,212,235,357
I_kwDOJ0Z1Ps6D3Ahd
3,381
Doesn't Ollama support gguf for embedding models?
{ "login": "17Reset", "id": 122418720, "node_id": "U_kgDOB0v2IA", "avatar_url": "https://avatars.githubusercontent.com/u/122418720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/17Reset", "html_url": "https://github.com/17Reset", "followers_url": "https://api.github.com/users/17Reset/followers", "following_url": "https://api.github.com/users/17Reset/following{/other_user}", "gists_url": "https://api.github.com/users/17Reset/gists{/gist_id}", "starred_url": "https://api.github.com/users/17Reset/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/17Reset/subscriptions", "organizations_url": "https://api.github.com/users/17Reset/orgs", "repos_url": "https://api.github.com/users/17Reset/repos", "events_url": "https://api.github.com/users/17Reset/events{/privacy}", "received_events_url": "https://api.github.com/users/17Reset/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-03-28T02:41:29
2024-03-28T05:17:09
2024-03-28T05:17:09
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
When I add the embedding model in gguf format, I can't add it to the ollama.
{ "login": "17Reset", "id": 122418720, "node_id": "U_kgDOB0v2IA", "avatar_url": "https://avatars.githubusercontent.com/u/122418720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/17Reset", "html_url": "https://github.com/17Reset", "followers_url": "https://api.github.com/users/17Reset/followers", "following_url": "https://api.github.com/users/17Reset/following{/other_user}", "gists_url": "https://api.github.com/users/17Reset/gists{/gist_id}", "starred_url": "https://api.github.com/users/17Reset/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/17Reset/subscriptions", "organizations_url": "https://api.github.com/users/17Reset/orgs", "repos_url": "https://api.github.com/users/17Reset/repos", "events_url": "https://api.github.com/users/17Reset/events{/privacy}", "received_events_url": "https://api.github.com/users/17Reset/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3381/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3381/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2539
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2539/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2539/comments
https://api.github.com/repos/ollama/ollama/issues/2539/events
https://github.com/ollama/ollama/issues/2539
2,138,446,844
I_kwDOJ0Z1Ps5_dhv8
2,539
Windows Preview v0.1.25 Proxy authentification failed
{ "login": "ben0r33", "id": 72662304, "node_id": "MDQ6VXNlcjcyNjYyMzA0", "avatar_url": "https://avatars.githubusercontent.com/u/72662304?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ben0r33", "html_url": "https://github.com/ben0r33", "followers_url": "https://api.github.com/users/ben0r33/followers", "following_url": "https://api.github.com/users/ben0r33/following{/other_user}", "gists_url": "https://api.github.com/users/ben0r33/gists{/gist_id}", "starred_url": "https://api.github.com/users/ben0r33/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ben0r33/subscriptions", "organizations_url": "https://api.github.com/users/ben0r33/orgs", "repos_url": "https://api.github.com/users/ben0r33/repos", "events_url": "https://api.github.com/users/ben0r33/events{/privacy}", "received_events_url": "https://api.github.com/users/ben0r33/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
4
2024-02-16T11:51:39
2024-02-29T06:05:46
2024-02-20T03:50:28
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello, I'm stoked about the window preview, thanks! When pulling a model, I'm receiving proxy authentification error. How can i either set a manual proxy configuration or add proxy authentification credentials to ollama windows? Background: Running on windows 10, proxy is pre-setup by company rules. Manually changing proxy to local cntlm proxy would be possible Thanks and best regards, ben0r
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2539/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2539/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4218
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4218/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4218/comments
https://api.github.com/repos/ollama/ollama/issues/4218/events
https://github.com/ollama/ollama/pull/4218
2,282,045,255
PR_kwDOJ0Z1Ps5ust1K
4,218
Enable concurrency by default
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-05-07T01:00:28
2024-07-01T15:32:32
2024-07-01T15:32:29
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4218", "html_url": "https://github.com/ollama/ollama/pull/4218", "diff_url": "https://github.com/ollama/ollama/pull/4218.diff", "patch_url": "https://github.com/ollama/ollama/pull/4218.patch", "merged_at": "2024-07-01T15:32:29" }
This adjusts our default settings to enable multiple models and parallel requests to a single model. Users can still override these by the same env var settings as before. Parallel has a direct impact on num_ctx, which in turn can have a significant impact on small VRAM GPUs so this change also refines the algorithm so that when parallel is not explicitly set by the user, we try to find a reasonable default that fits the model on their GPU(s). As before, multiple models will only load concurrently if they fully fit in VRAM. Corresponding Doc update to merge after this #5364
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4218/reactions", "total_count": 8, "+1": 8, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4218/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8615
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8615/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8615/comments
https://api.github.com/repos/ollama/ollama/issues/8615/events
https://github.com/ollama/ollama/issues/8615
2,813,973,065
I_kwDOJ0Z1Ps6nudJJ
8,615
[Enhancement] New Cohere models are not validated in `config.json`
{ "login": "sealad886", "id": 155285242, "node_id": "U_kgDOCUF2-g", "avatar_url": "https://avatars.githubusercontent.com/u/155285242?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sealad886", "html_url": "https://github.com/sealad886", "followers_url": "https://api.github.com/users/sealad886/followers", "following_url": "https://api.github.com/users/sealad886/following{/other_user}", "gists_url": "https://api.github.com/users/sealad886/gists{/gist_id}", "starred_url": "https://api.github.com/users/sealad886/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sealad886/subscriptions", "organizations_url": "https://api.github.com/users/sealad886/orgs", "repos_url": "https://api.github.com/users/sealad886/repos", "events_url": "https://api.github.com/users/sealad886/events{/privacy}", "received_events_url": "https://api.github.com/users/sealad886/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2025-01-27T20:21:20
2025-01-27T20:30:57
2025-01-27T20:30:55
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Cohere [has released](https://docs.cohere.com/v2/v1/docs/models/an-overview-of-coheres-models) several updated versions of old models (i.e. Command-R, Command-Light, and Command-R Plus) plus their embedding and reranker models; they have also release their new `command-r7b`. I note that all of these are available for free via their API, and they integrate well without significant setup. Continue's `config.json` doesn't validate anything except `command-r` and `command-r-plus`, so even the new updated version (`command-r-plus-08-2024`) wouldn't validate. I note that you can still put these values, it just throws a warning error. If using `AUTODETECT`, only `command-r` and `command-r-plus` will be detected. OS: MacOS GPU: Apple CPU: Apple Ollama version: 0.5.7
{ "login": "sealad886", "id": 155285242, "node_id": "U_kgDOCUF2-g", "avatar_url": "https://avatars.githubusercontent.com/u/155285242?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sealad886", "html_url": "https://github.com/sealad886", "followers_url": "https://api.github.com/users/sealad886/followers", "following_url": "https://api.github.com/users/sealad886/following{/other_user}", "gists_url": "https://api.github.com/users/sealad886/gists{/gist_id}", "starred_url": "https://api.github.com/users/sealad886/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sealad886/subscriptions", "organizations_url": "https://api.github.com/users/sealad886/orgs", "repos_url": "https://api.github.com/users/sealad886/repos", "events_url": "https://api.github.com/users/sealad886/events{/privacy}", "received_events_url": "https://api.github.com/users/sealad886/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8615/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8615/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/342
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/342/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/342/comments
https://api.github.com/repos/ollama/ollama/issues/342/events
https://github.com/ollama/ollama/issues/342
1,850,000,523
I_kwDOJ0Z1Ps5uRMSL
342
Cleaning the context
{ "login": "toto83fr", "id": 58964109, "node_id": "MDQ6VXNlcjU4OTY0MTA5", "avatar_url": "https://avatars.githubusercontent.com/u/58964109?v=4", "gravatar_id": "", "url": "https://api.github.com/users/toto83fr", "html_url": "https://github.com/toto83fr", "followers_url": "https://api.github.com/users/toto83fr/followers", "following_url": "https://api.github.com/users/toto83fr/following{/other_user}", "gists_url": "https://api.github.com/users/toto83fr/gists{/gist_id}", "starred_url": "https://api.github.com/users/toto83fr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/toto83fr/subscriptions", "organizations_url": "https://api.github.com/users/toto83fr/orgs", "repos_url": "https://api.github.com/users/toto83fr/repos", "events_url": "https://api.github.com/users/toto83fr/events{/privacy}", "received_events_url": "https://api.github.com/users/toto83fr/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
4
2023-08-14T15:19:18
2023-08-30T21:21:48
2023-08-30T21:21:47
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi, AFAIK it is impossible to clean the context when a model is running. Is there plans to implement this function?
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/342/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/342/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6888
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6888/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6888/comments
https://api.github.com/repos/ollama/ollama/issues/6888/events
https://github.com/ollama/ollama/issues/6888
2,537,645,517
I_kwDOJ0Z1Ps6XQWXN
6,888
an unknown error was encountered while running the model
{ "login": "ghost", "id": 10137, "node_id": "MDQ6VXNlcjEwMTM3", "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ghost", "html_url": "https://github.com/ghost", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "organizations_url": "https://api.github.com/users/ghost/orgs", "repos_url": "https://api.github.com/users/ghost/repos", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "received_events_url": "https://api.github.com/users/ghost/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
3
2024-09-20T01:34:55
2024-09-20T02:01:13
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? API returns error message which does not tell what is wrong. Reproduce: install model llava:13b-v1.5-q4_0 save this file: [req-data.txt](https://github.com/user-attachments/files/17068575/req-data.txt) run this command: ```curl -d "`cat req-data.txt`" http://localhost:11434/api/generate``` API responds with: {"error":"an unknown error was encountered while running the model "} ### OS Linux ### GPU Nvidia ### CPU AMD ### Ollama version 0.3.10
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6888/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6888/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/8398
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8398/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8398/comments
https://api.github.com/repos/ollama/ollama/issues/8398/events
https://github.com/ollama/ollama/issues/8398
2,782,972,298
I_kwDOJ0Z1Ps6l4MmK
8,398
Cancelling model loading for one requests cancels it for others
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
0
2025-01-13T04:13:14
2025-01-13T04:13:14
null
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When a request is canceled while a model is loading, all other requests are also cancelled ### OS _No response_ ### GPU _No response_ ### CPU _No response_ ### Ollama version _No response_
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8398/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8398/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/604
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/604/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/604/comments
https://api.github.com/repos/ollama/ollama/issues/604/events
https://github.com/ollama/ollama/pull/604
1,913,847,895
PR_kwDOJ0Z1Ps5bP9xG
604
Added ollama gui interface
{ "login": "TwanLuttik", "id": 19343894, "node_id": "MDQ6VXNlcjE5MzQzODk0", "avatar_url": "https://avatars.githubusercontent.com/u/19343894?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TwanLuttik", "html_url": "https://github.com/TwanLuttik", "followers_url": "https://api.github.com/users/TwanLuttik/followers", "following_url": "https://api.github.com/users/TwanLuttik/following{/other_user}", "gists_url": "https://api.github.com/users/TwanLuttik/gists{/gist_id}", "starred_url": "https://api.github.com/users/TwanLuttik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TwanLuttik/subscriptions", "organizations_url": "https://api.github.com/users/TwanLuttik/orgs", "repos_url": "https://api.github.com/users/TwanLuttik/repos", "events_url": "https://api.github.com/users/TwanLuttik/events{/privacy}", "received_events_url": "https://api.github.com/users/TwanLuttik/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-09-26T16:06:02
2023-09-29T02:14:25
2023-09-29T02:10:46
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/604", "html_url": "https://github.com/ollama/ollama/pull/604", "diff_url": "https://github.com/ollama/ollama/pull/604.diff", "patch_url": "https://github.com/ollama/ollama/pull/604.patch", "merged_at": null }
This was already added but since the new release i think it didn't came through the merge i suppose.
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/604/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/604/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7908
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7908/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7908/comments
https://api.github.com/repos/ollama/ollama/issues/7908/events
https://github.com/ollama/ollama/pull/7908
2,711,242,184
PR_kwDOJ0Z1Ps6DuFhQ
7,908
[docs] [modelfile.md] fix: broken Modelfile command
{ "login": "Geometrein", "id": 65066173, "node_id": "MDQ6VXNlcjY1MDY2MTcz", "avatar_url": "https://avatars.githubusercontent.com/u/65066173?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Geometrein", "html_url": "https://github.com/Geometrein", "followers_url": "https://api.github.com/users/Geometrein/followers", "following_url": "https://api.github.com/users/Geometrein/following{/other_user}", "gists_url": "https://api.github.com/users/Geometrein/gists{/gist_id}", "starred_url": "https://api.github.com/users/Geometrein/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Geometrein/subscriptions", "organizations_url": "https://api.github.com/users/Geometrein/orgs", "repos_url": "https://api.github.com/users/Geometrein/repos", "events_url": "https://api.github.com/users/Geometrein/events{/privacy}", "received_events_url": "https://api.github.com/users/Geometrein/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-12-02T09:39:56
2024-12-02T17:28:56
2024-12-02T17:28:56
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7908", "html_url": "https://github.com/ollama/ollama/pull/7908", "diff_url": "https://github.com/ollama/ollama/pull/7908.diff", "patch_url": "https://github.com/ollama/ollama/pull/7908.patch", "merged_at": "2024-12-02T17:28:56" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7908/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7908/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5827
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5827/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5827/comments
https://api.github.com/repos/ollama/ollama/issues/5827/events
https://github.com/ollama/ollama/pull/5827
2,421,313,037
PR_kwDOJ0Z1Ps52AkfG
5,827
Added Chrome and Firefox extension link to documentation
{ "login": "ivostoykov", "id": 889184, "node_id": "MDQ6VXNlcjg4OTE4NA==", "avatar_url": "https://avatars.githubusercontent.com/u/889184?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ivostoykov", "html_url": "https://github.com/ivostoykov", "followers_url": "https://api.github.com/users/ivostoykov/followers", "following_url": "https://api.github.com/users/ivostoykov/following{/other_user}", "gists_url": "https://api.github.com/users/ivostoykov/gists{/gist_id}", "starred_url": "https://api.github.com/users/ivostoykov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ivostoykov/subscriptions", "organizations_url": "https://api.github.com/users/ivostoykov/orgs", "repos_url": "https://api.github.com/users/ivostoykov/repos", "events_url": "https://api.github.com/users/ivostoykov/events{/privacy}", "received_events_url": "https://api.github.com/users/ivostoykov/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-07-21T09:26:18
2024-11-11T06:14:22
2024-11-11T06:14:22
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5827", "html_url": "https://github.com/ollama/ollama/pull/5827", "diff_url": "https://github.com/ollama/ollama/pull/5827.diff", "patch_url": "https://github.com/ollama/ollama/pull/5827.patch", "merged_at": "2024-11-11T06:14:22" }
Link to a browser extension that enables using Ollama for interacting with web pages.
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5827/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5827/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3475
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3475/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3475/comments
https://api.github.com/repos/ollama/ollama/issues/3475/events
https://github.com/ollama/ollama/issues/3475
2,223,062,738
I_kwDOJ0Z1Ps6EgT7S
3,475
Build fails when using OLLAMA_SKIP_CPU_GENERATE=1 on aarch64 Linux
{ "login": "remy415", "id": 105550370, "node_id": "U_kgDOBkqSIg", "avatar_url": "https://avatars.githubusercontent.com/u/105550370?v=4", "gravatar_id": "", "url": "https://api.github.com/users/remy415", "html_url": "https://github.com/remy415", "followers_url": "https://api.github.com/users/remy415/followers", "following_url": "https://api.github.com/users/remy415/following{/other_user}", "gists_url": "https://api.github.com/users/remy415/gists{/gist_id}", "starred_url": "https://api.github.com/users/remy415/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/remy415/subscriptions", "organizations_url": "https://api.github.com/users/remy415/orgs", "repos_url": "https://api.github.com/users/remy415/repos", "events_url": "https://api.github.com/users/remy415/events{/privacy}", "received_events_url": "https://api.github.com/users/remy415/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
7
2024-04-03T14:13:46
2024-04-19T07:48:04
2024-04-19T07:48:04
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When using the OLLAMA_SKIP_CPU_GENERATE=1 flag for compiling the binary, the Ollama binary fails to compile with the error: ``` In file included from gpu_info_nvml.h:4, from gpu_info_nvml.c:5: gpu_info_nvml.c: In function β€˜nvml_check_vram’: gpu_info_nvml.c:158:20: warning: format β€˜%ld’ expects argument of type β€˜long int’, but argument 4 has type β€˜long long unsigned int’ [-Wformat=] 158 | LOG(h.verbose, "[%d] CUDA totalMem %ld\n", i, memInfo.total); | ^~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~ | | | long long unsigned int gpu_info.h:33:23: note: in definition of macro β€˜LOG’ 33 | fprintf(stderr, __VA_ARGS__); \ | ^~~~~~~~~~~ gpu_info_nvml.c:158:42: note: format string is defined here 158 | LOG(h.verbose, "[%d] CUDA totalMem %ld\n", i, memInfo.total); | ~~^ | | | long int | %lld In file included from gpu_info_nvml.h:4, from gpu_info_nvml.c:5: gpu_info_nvml.c:159:20: warning: format β€˜%ld’ expects argument of type β€˜long int’, but argument 4 has type β€˜long long unsigned int’ [-Wformat=] 159 | LOG(h.verbose, "[%d] CUDA freeMem %ld\n", i, memInfo.free); | ^~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~ | | | long long unsigned int gpu_info.h:33:23: note: in definition of macro β€˜LOG’ 33 | fprintf(stderr, __VA_ARGS__); \ | ^~~~~~~~~~~ gpu_info_nvml.c:159:41: note: format string is defined here 159 | LOG(h.verbose, "[%d] CUDA freeMem %ld\n", i, memInfo.free); | ~~^ | | | long int | %lld # github.com/ollama/ollama /home/tegra/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.22.0.linux-arm64/pkg/tool/linux_arm64/link: running gcc failed: exit status 1 gcc: error: /home/tegra/ok3d/ollama-container/dev/ollama/llm/build/linux/arm64_static/libllama.a: No such file or directory ``` ### What did you expect to see? It appears code to build a required static library folder was added inside of the optional flag `OLLAMA_SKIP_CPU_GENERATE` on line 62. Not sure if the intent was to build the `_static` folder to include in every build or to only include it when building for CPU. Additionally, although I enjoy saving time by adding the OLLAMA_SKIP_CPU_GENERATE flag, should the LCD_CPU build be created regardless of the skip flag as a fallback for GPU OOM? ### Steps to reproduce Set `OLLAMA_SKIP_CPU_GENERATE=1` as an environment variable for compiling the binary. ### Are there any recent changes that introduced the issue? In file `llm/generate/gen_linux.sh`, the following code snippet exists starting on line 62: ``` if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "static" ]; then # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}_static" echo "Building static library" build fi ``` ### OS Linux ### Architecture arm64 ### Platform _No response_ ### Ollama version 0.1.30 ### GPU Nvidia ### GPU info NVidia Jetson Orin Nano 8Gb (Tegra ARM64v8 SOC) ### CPU Other ### Other software _No response_
{ "login": "remy415", "id": 105550370, "node_id": "U_kgDOBkqSIg", "avatar_url": "https://avatars.githubusercontent.com/u/105550370?v=4", "gravatar_id": "", "url": "https://api.github.com/users/remy415", "html_url": "https://github.com/remy415", "followers_url": "https://api.github.com/users/remy415/followers", "following_url": "https://api.github.com/users/remy415/following{/other_user}", "gists_url": "https://api.github.com/users/remy415/gists{/gist_id}", "starred_url": "https://api.github.com/users/remy415/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/remy415/subscriptions", "organizations_url": "https://api.github.com/users/remy415/orgs", "repos_url": "https://api.github.com/users/remy415/repos", "events_url": "https://api.github.com/users/remy415/events{/privacy}", "received_events_url": "https://api.github.com/users/remy415/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3475/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3475/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3658
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3658/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3658/comments
https://api.github.com/repos/ollama/ollama/issues/3658/events
https://github.com/ollama/ollama/issues/3658
2,244,460,762
I_kwDOJ0Z1Ps6Fx8Da
3,658
codegemma:instruct : forcibly closed by the remote host
{ "login": "MrBenzWorld", "id": 113277019, "node_id": "U_kgDOBsB4Ww", "avatar_url": "https://avatars.githubusercontent.com/u/113277019?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MrBenzWorld", "html_url": "https://github.com/MrBenzWorld", "followers_url": "https://api.github.com/users/MrBenzWorld/followers", "following_url": "https://api.github.com/users/MrBenzWorld/following{/other_user}", "gists_url": "https://api.github.com/users/MrBenzWorld/gists{/gist_id}", "starred_url": "https://api.github.com/users/MrBenzWorld/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MrBenzWorld/subscriptions", "organizations_url": "https://api.github.com/users/MrBenzWorld/orgs", "repos_url": "https://api.github.com/users/MrBenzWorld/repos", "events_url": "https://api.github.com/users/MrBenzWorld/events{/privacy}", "received_events_url": "https://api.github.com/users/MrBenzWorld/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-04-15T19:34:17
2024-05-25T10:57:25
2024-05-25T10:57:25
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
null
{ "login": "MrBenzWorld", "id": 113277019, "node_id": "U_kgDOBsB4Ww", "avatar_url": "https://avatars.githubusercontent.com/u/113277019?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MrBenzWorld", "html_url": "https://github.com/MrBenzWorld", "followers_url": "https://api.github.com/users/MrBenzWorld/followers", "following_url": "https://api.github.com/users/MrBenzWorld/following{/other_user}", "gists_url": "https://api.github.com/users/MrBenzWorld/gists{/gist_id}", "starred_url": "https://api.github.com/users/MrBenzWorld/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MrBenzWorld/subscriptions", "organizations_url": "https://api.github.com/users/MrBenzWorld/orgs", "repos_url": "https://api.github.com/users/MrBenzWorld/repos", "events_url": "https://api.github.com/users/MrBenzWorld/events{/privacy}", "received_events_url": "https://api.github.com/users/MrBenzWorld/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3658/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3658/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2592
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2592/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2592/comments
https://api.github.com/repos/ollama/ollama/issues/2592/events
https://github.com/ollama/ollama/pull/2592
2,142,415,095
PR_kwDOJ0Z1Ps5nSnSK
2,592
Update linux.md
{ "login": "krenax", "id": 127540387, "node_id": "U_kgDOB5ocow", "avatar_url": "https://avatars.githubusercontent.com/u/127540387?v=4", "gravatar_id": "", "url": "https://api.github.com/users/krenax", "html_url": "https://github.com/krenax", "followers_url": "https://api.github.com/users/krenax/followers", "following_url": "https://api.github.com/users/krenax/following{/other_user}", "gists_url": "https://api.github.com/users/krenax/gists{/gist_id}", "starred_url": "https://api.github.com/users/krenax/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/krenax/subscriptions", "organizations_url": "https://api.github.com/users/krenax/orgs", "repos_url": "https://api.github.com/users/krenax/repos", "events_url": "https://api.github.com/users/krenax/events{/privacy}", "received_events_url": "https://api.github.com/users/krenax/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-02-19T13:47:26
2024-02-19T13:50:35
2024-02-19T13:50:34
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2592", "html_url": "https://github.com/ollama/ollama/pull/2592", "diff_url": "https://github.com/ollama/ollama/pull/2592.diff", "patch_url": "https://github.com/ollama/ollama/pull/2592.patch", "merged_at": null }
null
{ "login": "krenax", "id": 127540387, "node_id": "U_kgDOB5ocow", "avatar_url": "https://avatars.githubusercontent.com/u/127540387?v=4", "gravatar_id": "", "url": "https://api.github.com/users/krenax", "html_url": "https://github.com/krenax", "followers_url": "https://api.github.com/users/krenax/followers", "following_url": "https://api.github.com/users/krenax/following{/other_user}", "gists_url": "https://api.github.com/users/krenax/gists{/gist_id}", "starred_url": "https://api.github.com/users/krenax/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/krenax/subscriptions", "organizations_url": "https://api.github.com/users/krenax/orgs", "repos_url": "https://api.github.com/users/krenax/repos", "events_url": "https://api.github.com/users/krenax/events{/privacy}", "received_events_url": "https://api.github.com/users/krenax/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2592/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2592/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3786
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3786/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3786/comments
https://api.github.com/repos/ollama/ollama/issues/3786/events
https://github.com/ollama/ollama/issues/3786
2,254,720,665
I_kwDOJ0Z1Ps6GZE6Z
3,786
My internet is too slow to download the model
{ "login": "skystar7", "id": 2904078, "node_id": "MDQ6VXNlcjI5MDQwNzg=", "avatar_url": "https://avatars.githubusercontent.com/u/2904078?v=4", "gravatar_id": "", "url": "https://api.github.com/users/skystar7", "html_url": "https://github.com/skystar7", "followers_url": "https://api.github.com/users/skystar7/followers", "following_url": "https://api.github.com/users/skystar7/following{/other_user}", "gists_url": "https://api.github.com/users/skystar7/gists{/gist_id}", "starred_url": "https://api.github.com/users/skystar7/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/skystar7/subscriptions", "organizations_url": "https://api.github.com/users/skystar7/orgs", "repos_url": "https://api.github.com/users/skystar7/repos", "events_url": "https://api.github.com/users/skystar7/events{/privacy}", "received_events_url": "https://api.github.com/users/skystar7/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 6677370291, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCVsw", "url": "https://api.github.com/repos/ollama/ollama/labels/networking", "name": "networking", "color": "0B5368", "default": false, "description": "Issues relating to ollama pull and push" } ]
open
false
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
7
2024-04-20T21:03:07
2025-01-28T09:34:23
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I keep getting the same error > ollama run llama3 > pulling manifest > pulling 00e1317cbf74... 1% β–• ▏ 28 MB/4.7 GB 61 KB/s 20h58m > Error: max retries exceeded: Get "https://dd20bb891979d25aebc8bec07b2b3bbc.r2.cloudflarestorage.com/ollama/docker/registry/v2/blobs/sha256/00/00e1317cbf74d901080d7100f57580ba8dd8de57203072dc6f668324ba545f29/data?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=66040c77ac1b787c3af820529859349a%!F(MISSING)20240420%!F(MISSING)auto%!F(MISSING)s3%!F(MISSING)aws4_request&X-Amz-Date=20240420T204533Z&X-Amz-Expires=1200&X-Amz-SignedHeaders=host&X-Amz-Signature=8e4efa642799d32a9e502990baa74226f9f0f740b2cdb7ed0bd6b6741e0106df": net/http: TLS handshake timeout Is there a way to download llama3 externally and then use it with ollama? Many thanks ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version 0.1.32
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3786/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3786/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1333
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1333/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1333/comments
https://api.github.com/repos/ollama/ollama/issues/1333/events
https://github.com/ollama/ollama/issues/1333
2,018,932,762
I_kwDOJ0Z1Ps54Vnga
1,333
ImportError : cannot import name 'llama2' from 'langchain
{ "login": "illiyaz", "id": 13364723, "node_id": "MDQ6VXNlcjEzMzY0NzIz", "avatar_url": "https://avatars.githubusercontent.com/u/13364723?v=4", "gravatar_id": "", "url": "https://api.github.com/users/illiyaz", "html_url": "https://github.com/illiyaz", "followers_url": "https://api.github.com/users/illiyaz/followers", "following_url": "https://api.github.com/users/illiyaz/following{/other_user}", "gists_url": "https://api.github.com/users/illiyaz/gists{/gist_id}", "starred_url": "https://api.github.com/users/illiyaz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/illiyaz/subscriptions", "organizations_url": "https://api.github.com/users/illiyaz/orgs", "repos_url": "https://api.github.com/users/illiyaz/repos", "events_url": "https://api.github.com/users/illiyaz/events{/privacy}", "received_events_url": "https://api.github.com/users/illiyaz/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-11-30T15:37:50
2023-11-30T15:39:35
2023-11-30T15:39:35
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I have been trying to run some sample code with Ollama on mac but was not able to run this successful I was getting an error in this line 'from langchain.llms import Ollama' The version of langchain was 0.0.236
{ "login": "illiyaz", "id": 13364723, "node_id": "MDQ6VXNlcjEzMzY0NzIz", "avatar_url": "https://avatars.githubusercontent.com/u/13364723?v=4", "gravatar_id": "", "url": "https://api.github.com/users/illiyaz", "html_url": "https://github.com/illiyaz", "followers_url": "https://api.github.com/users/illiyaz/followers", "following_url": "https://api.github.com/users/illiyaz/following{/other_user}", "gists_url": "https://api.github.com/users/illiyaz/gists{/gist_id}", "starred_url": "https://api.github.com/users/illiyaz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/illiyaz/subscriptions", "organizations_url": "https://api.github.com/users/illiyaz/orgs", "repos_url": "https://api.github.com/users/illiyaz/repos", "events_url": "https://api.github.com/users/illiyaz/events{/privacy}", "received_events_url": "https://api.github.com/users/illiyaz/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1333/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1333/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7529
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7529/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7529/comments
https://api.github.com/repos/ollama/ollama/issues/7529/events
https://github.com/ollama/ollama/pull/7529
2,638,658,955
PR_kwDOJ0Z1Ps6BFUL2
7,529
win: remove preview title from installer
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-11-06T16:44:24
2024-11-07T22:26:50
2024-11-07T22:26:47
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7529", "html_url": "https://github.com/ollama/ollama/pull/7529", "diff_url": "https://github.com/ollama/ollama/pull/7529.diff", "patch_url": "https://github.com/ollama/ollama/pull/7529.patch", "merged_at": "2024-11-07T22:26:47" }
This should have been in #7347 but was overlooked.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7529/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7529/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2156
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2156/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2156/comments
https://api.github.com/repos/ollama/ollama/issues/2156/events
https://github.com/ollama/ollama/issues/2156
2,095,717,132
I_kwDOJ0Z1Ps586hsM
2,156
I want to run Ollama on the limited number of GPUS and CPUS
{ "login": "sfarzi", "id": 133970229, "node_id": "U_kgDOB_w5NQ", "avatar_url": "https://avatars.githubusercontent.com/u/133970229?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sfarzi", "html_url": "https://github.com/sfarzi", "followers_url": "https://api.github.com/users/sfarzi/followers", "following_url": "https://api.github.com/users/sfarzi/following{/other_user}", "gists_url": "https://api.github.com/users/sfarzi/gists{/gist_id}", "starred_url": "https://api.github.com/users/sfarzi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sfarzi/subscriptions", "organizations_url": "https://api.github.com/users/sfarzi/orgs", "repos_url": "https://api.github.com/users/sfarzi/repos", "events_url": "https://api.github.com/users/sfarzi/events{/privacy}", "received_events_url": "https://api.github.com/users/sfarzi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
3
2024-01-23T10:09:11
2024-03-12T18:38:18
2024-03-12T18:36:47
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I have a machine with 4 GPUS and 16 CPUS. but I want to run Ollama just on one gpu and 8 cpus. How can I do this?
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2156/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2156/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4310
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4310/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4310/comments
https://api.github.com/repos/ollama/ollama/issues/4310/events
https://github.com/ollama/ollama/issues/4310
2,289,261,287
I_kwDOJ0Z1Ps6Ic1rn
4,310
updating to v0.1.34 from v0.1.33 with models from file from llama3:70b-instruct-q4_0 with 23k SYSTEM data yields in >100s processing and 504 response
{ "login": "aiboogie", "id": 11837666, "node_id": "MDQ6VXNlcjExODM3NjY2", "avatar_url": "https://avatars.githubusercontent.com/u/11837666?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aiboogie", "html_url": "https://github.com/aiboogie", "followers_url": "https://api.github.com/users/aiboogie/followers", "following_url": "https://api.github.com/users/aiboogie/following{/other_user}", "gists_url": "https://api.github.com/users/aiboogie/gists{/gist_id}", "starred_url": "https://api.github.com/users/aiboogie/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aiboogie/subscriptions", "organizations_url": "https://api.github.com/users/aiboogie/orgs", "repos_url": "https://api.github.com/users/aiboogie/repos", "events_url": "https://api.github.com/users/aiboogie/events{/privacy}", "received_events_url": "https://api.github.com/users/aiboogie/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
1
2024-05-10T08:27:32
2024-05-11T02:24:56
2024-05-11T02:24:56
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I was using ollama v0.1.33 with models from file from llama3:70b-instruct-q4_0 with 23k SYSTEM data Each prompt was not taking more than 20s (except the initial model load request) on a M1 Ultra 128GB Ram and 48 GPU cores. I updated today to v0.1.34, using the same generated model, for all requests i get a huge processing time when reaching 100s it disconnects execution with a 504 response code. Downgrading back to v0.1.33 solves the problem, so it must be something with v0.1.34 Please advise, Thanks! ### OS macOS ### GPU Apple ### CPU Apple ### Ollama version 0.1.34
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4310/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4310/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4329
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4329/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4329/comments
https://api.github.com/repos/ollama/ollama/issues/4329/events
https://github.com/ollama/ollama/pull/4329
2,290,472,338
PR_kwDOJ0Z1Ps5vI2kO
4,329
Fall back to CPU runner with zero layers
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-05-10T22:10:08
2024-05-10T22:23:19
2024-05-10T22:23:16
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4329", "html_url": "https://github.com/ollama/ollama/pull/4329", "diff_url": "https://github.com/ollama/ollama/pull/4329.diff", "patch_url": "https://github.com/ollama/ollama/pull/4329.patch", "merged_at": "2024-05-10T22:23:16" }
null
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4329/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4329/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2761
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2761/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2761/comments
https://api.github.com/repos/ollama/ollama/issues/2761/events
https://github.com/ollama/ollama/pull/2761
2,153,557,868
PR_kwDOJ0Z1Ps5n4ebs
2,761
docs: update development.md add docker build desc
{ "login": "zvrr", "id": 194304, "node_id": "MDQ6VXNlcjE5NDMwNA==", "avatar_url": "https://avatars.githubusercontent.com/u/194304?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zvrr", "html_url": "https://github.com/zvrr", "followers_url": "https://api.github.com/users/zvrr/followers", "following_url": "https://api.github.com/users/zvrr/following{/other_user}", "gists_url": "https://api.github.com/users/zvrr/gists{/gist_id}", "starred_url": "https://api.github.com/users/zvrr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zvrr/subscriptions", "organizations_url": "https://api.github.com/users/zvrr/orgs", "repos_url": "https://api.github.com/users/zvrr/repos", "events_url": "https://api.github.com/users/zvrr/events{/privacy}", "received_events_url": "https://api.github.com/users/zvrr/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-02-26T08:24:02
2024-03-18T03:00:18
2024-03-18T03:00:18
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2761", "html_url": "https://github.com/ollama/ollama/pull/2761", "diff_url": "https://github.com/ollama/ollama/pull/2761.diff", "patch_url": "https://github.com/ollama/ollama/pull/2761.patch", "merged_at": null }
Update development.md, add docker build desc if you want to build a docker image.
{ "login": "zvrr", "id": 194304, "node_id": "MDQ6VXNlcjE5NDMwNA==", "avatar_url": "https://avatars.githubusercontent.com/u/194304?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zvrr", "html_url": "https://github.com/zvrr", "followers_url": "https://api.github.com/users/zvrr/followers", "following_url": "https://api.github.com/users/zvrr/following{/other_user}", "gists_url": "https://api.github.com/users/zvrr/gists{/gist_id}", "starred_url": "https://api.github.com/users/zvrr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zvrr/subscriptions", "organizations_url": "https://api.github.com/users/zvrr/orgs", "repos_url": "https://api.github.com/users/zvrr/repos", "events_url": "https://api.github.com/users/zvrr/events{/privacy}", "received_events_url": "https://api.github.com/users/zvrr/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2761/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2761/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4536
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4536/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4536/comments
https://api.github.com/repos/ollama/ollama/issues/4536/events
https://github.com/ollama/ollama/pull/4536
2,305,906,153
PR_kwDOJ0Z1Ps5v9H9P
4,536
chore: fix typo in docs
{ "login": "alwqx", "id": 9915368, "node_id": "MDQ6VXNlcjk5MTUzNjg=", "avatar_url": "https://avatars.githubusercontent.com/u/9915368?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alwqx", "html_url": "https://github.com/alwqx", "followers_url": "https://api.github.com/users/alwqx/followers", "following_url": "https://api.github.com/users/alwqx/following{/other_user}", "gists_url": "https://api.github.com/users/alwqx/gists{/gist_id}", "starred_url": "https://api.github.com/users/alwqx/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alwqx/subscriptions", "organizations_url": "https://api.github.com/users/alwqx/orgs", "repos_url": "https://api.github.com/users/alwqx/repos", "events_url": "https://api.github.com/users/alwqx/events{/privacy}", "received_events_url": "https://api.github.com/users/alwqx/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-05-20T12:36:29
2024-05-20T21:19:03
2024-05-20T21:19:03
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4536", "html_url": "https://github.com/ollama/ollama/pull/4536", "diff_url": "https://github.com/ollama/ollama/pull/4536.diff", "patch_url": "https://github.com/ollama/ollama/pull/4536.patch", "merged_at": "2024-05-20T21:19:03" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4536/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4536/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3883
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3883/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3883/comments
https://api.github.com/repos/ollama/ollama/issues/3883/events
https://github.com/ollama/ollama/issues/3883
2,261,740,761
I_kwDOJ0Z1Ps6Gz2zZ
3,883
Support for Snowflake Arctic
{ "login": "djsavvy", "id": 26914352, "node_id": "MDQ6VXNlcjI2OTE0MzUy", "avatar_url": "https://avatars.githubusercontent.com/u/26914352?v=4", "gravatar_id": "", "url": "https://api.github.com/users/djsavvy", "html_url": "https://github.com/djsavvy", "followers_url": "https://api.github.com/users/djsavvy/followers", "following_url": "https://api.github.com/users/djsavvy/following{/other_user}", "gists_url": "https://api.github.com/users/djsavvy/gists{/gist_id}", "starred_url": "https://api.github.com/users/djsavvy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/djsavvy/subscriptions", "organizations_url": "https://api.github.com/users/djsavvy/orgs", "repos_url": "https://api.github.com/users/djsavvy/repos", "events_url": "https://api.github.com/users/djsavvy/events{/privacy}", "received_events_url": "https://api.github.com/users/djsavvy/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
3
2024-04-24T16:45:50
2024-12-20T00:26:46
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Snowflake just released new models: https://huggingface.co/Snowflake/snowflake-arctic-instruct https://huggingface.co/Snowflake/snowflake-arctic-base
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3883/reactions", "total_count": 36, "+1": 36, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3883/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/8595
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8595/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8595/comments
https://api.github.com/repos/ollama/ollama/issues/8595/events
https://github.com/ollama/ollama/issues/8595
2,811,649,642
I_kwDOJ0Z1Ps6nll5q
8,595
Train Ollama models using custom data
{ "login": "samrudha01codespace", "id": 144599345, "node_id": "U_kgDOCJ5pMQ", "avatar_url": "https://avatars.githubusercontent.com/u/144599345?v=4", "gravatar_id": "", "url": "https://api.github.com/users/samrudha01codespace", "html_url": "https://github.com/samrudha01codespace", "followers_url": "https://api.github.com/users/samrudha01codespace/followers", "following_url": "https://api.github.com/users/samrudha01codespace/following{/other_user}", "gists_url": "https://api.github.com/users/samrudha01codespace/gists{/gist_id}", "starred_url": "https://api.github.com/users/samrudha01codespace/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/samrudha01codespace/subscriptions", "organizations_url": "https://api.github.com/users/samrudha01codespace/orgs", "repos_url": "https://api.github.com/users/samrudha01codespace/repos", "events_url": "https://api.github.com/users/samrudha01codespace/events{/privacy}", "received_events_url": "https://api.github.com/users/samrudha01codespace/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
2
2025-01-26T16:24:59
2025-01-28T21:32:55
2025-01-28T21:32:55
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Can users train the small ollama models using there datasets?
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8595/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8595/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/6341
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6341/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6341/comments
https://api.github.com/repos/ollama/ollama/issues/6341/events
https://github.com/ollama/ollama/issues/6341
2,463,827,701
I_kwDOJ0Z1Ps6S2wb1
6,341
Llama 3.1 70B high-quality HQQ quantized model - 99%+ quality of fp16
{ "login": "gileneusz", "id": 34601970, "node_id": "MDQ6VXNlcjM0NjAxOTcw", "avatar_url": "https://avatars.githubusercontent.com/u/34601970?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gileneusz", "html_url": "https://github.com/gileneusz", "followers_url": "https://api.github.com/users/gileneusz/followers", "following_url": "https://api.github.com/users/gileneusz/following{/other_user}", "gists_url": "https://api.github.com/users/gileneusz/gists{/gist_id}", "starred_url": "https://api.github.com/users/gileneusz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gileneusz/subscriptions", "organizations_url": "https://api.github.com/users/gileneusz/orgs", "repos_url": "https://api.github.com/users/gileneusz/repos", "events_url": "https://api.github.com/users/gileneusz/events{/privacy}", "received_events_url": "https://api.github.com/users/gileneusz/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
2
2024-08-13T17:08:12
2024-08-21T12:18:59
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I'm not really sure if that's possible but adding that to ollama could really impact the performance on 4-bit quant option: 99%+ in all benchmarks in lm-eval relative performance to FP16 and similar inference speed to fp16 url: https://huggingface.co/mobiuslabsgmbh/Llama-3.1-70b-instruct_4bitgs64_hqq <img width="604" alt="Screenshot 2024-08-13 at 19 03 57" src="https://github.com/user-attachments/assets/64cd0427-b7c7-4fb8-b846-15f172669248"> also this: https://huggingface.co/ModelCloud/Meta-Llama-3.1-70B-Instruct-gptq-4bit <img width="597" alt="Screenshot 2024-08-13 at 19 07 18" src="https://github.com/user-attachments/assets/c3518ffe-323d-42f0-9162-d188179797fb">
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6341/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6341/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/8571
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8571/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8571/comments
https://api.github.com/repos/ollama/ollama/issues/8571/events
https://github.com/ollama/ollama/issues/8571
2,810,620,860
I_kwDOJ0Z1Ps6nhqu8
8,571
running deepseek r1 671b on 64GB / 128GB ram mac gives `Error: llama runner process has terminated: signal: killed`
{ "login": "duttaoindril", "id": 4969854, "node_id": "MDQ6VXNlcjQ5Njk4NTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/4969854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/duttaoindril", "html_url": "https://github.com/duttaoindril", "followers_url": "https://api.github.com/users/duttaoindril/followers", "following_url": "https://api.github.com/users/duttaoindril/following{/other_user}", "gists_url": "https://api.github.com/users/duttaoindril/gists{/gist_id}", "starred_url": "https://api.github.com/users/duttaoindril/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/duttaoindril/subscriptions", "organizations_url": "https://api.github.com/users/duttaoindril/orgs", "repos_url": "https://api.github.com/users/duttaoindril/repos", "events_url": "https://api.github.com/users/duttaoindril/events{/privacy}", "received_events_url": "https://api.github.com/users/duttaoindril/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
33
2025-01-25T00:07:48
2025-01-29T17:28:53
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? after waiting all day for the model to download, `ollama run deepseek-r1:671b` fails to run with the error `Error: llama runner process has terminated: signal: killed`. I can run the deepseek-r1:70b llama model just fine. I'm running a Macbook M3 Pro 64GB ram, I'm assuming it's failing due to lack of memory? - how do I know the real memory requirements for a model? i don't think it's obvious on the ollama page. - any way to fix this at all? I tried it on my 128GB M1 Ultra Mac Studio and got the same error. I'd really love to run this locally, so would appreciate any help! ### OS macOS ### GPU Apple ### CPU Apple ### Ollama version 0.5.7
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8571/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8571/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/2122
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2122/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2122/comments
https://api.github.com/repos/ollama/ollama/issues/2122/events
https://github.com/ollama/ollama/issues/2122
2,092,523,401
I_kwDOJ0Z1Ps58uV-J
2,122
Cannot run ollama on my server using the docker image, error 132
{ "login": "GuiPoM", "id": 11942518, "node_id": "MDQ6VXNlcjExOTQyNTE4", "avatar_url": "https://avatars.githubusercontent.com/u/11942518?v=4", "gravatar_id": "", "url": "https://api.github.com/users/GuiPoM", "html_url": "https://github.com/GuiPoM", "followers_url": "https://api.github.com/users/GuiPoM/followers", "following_url": "https://api.github.com/users/GuiPoM/following{/other_user}", "gists_url": "https://api.github.com/users/GuiPoM/gists{/gist_id}", "starred_url": "https://api.github.com/users/GuiPoM/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/GuiPoM/subscriptions", "organizations_url": "https://api.github.com/users/GuiPoM/orgs", "repos_url": "https://api.github.com/users/GuiPoM/repos", "events_url": "https://api.github.com/users/GuiPoM/events{/privacy}", "received_events_url": "https://api.github.com/users/GuiPoM/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
5
2024-01-21T10:29:09
2024-01-28T19:34:33
2024-01-28T19:34:33
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hello, This is the first time I am facing such an issue, I cannot run the container at all, it crashes right when it is deployed. I don't know which information should be useful to debug that issue, my host is a debian 12 server with docker 25 ce I was first deploying using a compose file but I switched back to the docker command line to double check: `docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama` It creates a volume, but container crashes with error code 132: ``` State Dead false Error ExitCode 132 FinishedAt 2024-01-21T10:24:09.726297577Z OOMKilled false Paused false Pid 0 Restarting false Running false StartedAt 2024-01-21T10:24:09.724212624Z Status exited ``` Then I have no clue to identify what is going on, I was not able to find a reference to error 132 in the source code, that could help me do some further checks. Maybe you will have some ideas ! Thanks !
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2122/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2122/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/1481
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1481/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1481/comments
https://api.github.com/repos/ollama/ollama/issues/1481/events
https://github.com/ollama/ollama/issues/1481
2,037,919,879
I_kwDOJ0Z1Ps55eDCH
1,481
/api/chat 404 not found version v0.1.14 linux
{ "login": "dishantsingla", "id": 135211326, "node_id": "U_kgDOCA8pPg", "avatar_url": "https://avatars.githubusercontent.com/u/135211326?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dishantsingla", "html_url": "https://github.com/dishantsingla", "followers_url": "https://api.github.com/users/dishantsingla/followers", "following_url": "https://api.github.com/users/dishantsingla/following{/other_user}", "gists_url": "https://api.github.com/users/dishantsingla/gists{/gist_id}", "starred_url": "https://api.github.com/users/dishantsingla/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dishantsingla/subscriptions", "organizations_url": "https://api.github.com/users/dishantsingla/orgs", "repos_url": "https://api.github.com/users/dishantsingla/repos", "events_url": "https://api.github.com/users/dishantsingla/events{/privacy}", "received_events_url": "https://api.github.com/users/dishantsingla/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-12-12T14:48:41
2023-12-12T17:30:47
2023-12-12T17:30:24
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I have updated ollama to latest version v0.1.14 running on amd linux however it is showing me /api/chat endpoint is not available. Is this endpoint yet to be released for linux? <img width="441" alt="image" src="https://github.com/jmorganca/ollama/assets/135211326/9340deb3-9e56-4d18-8985-51f2f3b7fe42"> <img width="547" alt="image" src="https://github.com/jmorganca/ollama/assets/135211326/afb55d5b-cb5d-4645-a892-869cfe942b5b">
{ "login": "dishantsingla", "id": 135211326, "node_id": "U_kgDOCA8pPg", "avatar_url": "https://avatars.githubusercontent.com/u/135211326?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dishantsingla", "html_url": "https://github.com/dishantsingla", "followers_url": "https://api.github.com/users/dishantsingla/followers", "following_url": "https://api.github.com/users/dishantsingla/following{/other_user}", "gists_url": "https://api.github.com/users/dishantsingla/gists{/gist_id}", "starred_url": "https://api.github.com/users/dishantsingla/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dishantsingla/subscriptions", "organizations_url": "https://api.github.com/users/dishantsingla/orgs", "repos_url": "https://api.github.com/users/dishantsingla/repos", "events_url": "https://api.github.com/users/dishantsingla/events{/privacy}", "received_events_url": "https://api.github.com/users/dishantsingla/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1481/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1481/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2507
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2507/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2507/comments
https://api.github.com/repos/ollama/ollama/issues/2507/events
https://github.com/ollama/ollama/issues/2507
2,135,524,120
I_kwDOJ0Z1Ps5_SYMY
2,507
Running Ollama on localnetwork
{ "login": "Jimmys-Code", "id": 115545051, "node_id": "U_kgDOBuMT2w", "avatar_url": "https://avatars.githubusercontent.com/u/115545051?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Jimmys-Code", "html_url": "https://github.com/Jimmys-Code", "followers_url": "https://api.github.com/users/Jimmys-Code/followers", "following_url": "https://api.github.com/users/Jimmys-Code/following{/other_user}", "gists_url": "https://api.github.com/users/Jimmys-Code/gists{/gist_id}", "starred_url": "https://api.github.com/users/Jimmys-Code/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Jimmys-Code/subscriptions", "organizations_url": "https://api.github.com/users/Jimmys-Code/orgs", "repos_url": "https://api.github.com/users/Jimmys-Code/repos", "events_url": "https://api.github.com/users/Jimmys-Code/events{/privacy}", "received_events_url": "https://api.github.com/users/Jimmys-Code/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396220, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2afA", "url": "https://api.github.com/repos/ollama/ollama/labels/question", "name": "question", "color": "d876e3", "default": true, "description": "General questions" } ]
closed
false
null
[]
null
3
2024-02-15T02:33:36
2024-03-11T18:50:25
2024-03-11T18:50:25
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
I am building a python ai project inside a docker container an my windows PC. I was wondering if i could run the Ollama server on my Mac and connect to it from the Pc from inside that docker container how to actually achieve this. Still new to python and programming so any help would be much appreciated thanks.
{ "login": "hoyyeva", "id": 63033505, "node_id": "MDQ6VXNlcjYzMDMzNTA1", "avatar_url": "https://avatars.githubusercontent.com/u/63033505?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hoyyeva", "html_url": "https://github.com/hoyyeva", "followers_url": "https://api.github.com/users/hoyyeva/followers", "following_url": "https://api.github.com/users/hoyyeva/following{/other_user}", "gists_url": "https://api.github.com/users/hoyyeva/gists{/gist_id}", "starred_url": "https://api.github.com/users/hoyyeva/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hoyyeva/subscriptions", "organizations_url": "https://api.github.com/users/hoyyeva/orgs", "repos_url": "https://api.github.com/users/hoyyeva/repos", "events_url": "https://api.github.com/users/hoyyeva/events{/privacy}", "received_events_url": "https://api.github.com/users/hoyyeva/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2507/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2507/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8610
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8610/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8610/comments
https://api.github.com/repos/ollama/ollama/issues/8610/events
https://github.com/ollama/ollama/issues/8610
2,813,512,486
I_kwDOJ0Z1Ps6nsssm
8,610
Add the ability to import from gguf directly without a Modelfile
{ "login": "LeC-D", "id": 17554693, "node_id": "MDQ6VXNlcjE3NTU0Njkz", "avatar_url": "https://avatars.githubusercontent.com/u/17554693?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LeC-D", "html_url": "https://github.com/LeC-D", "followers_url": "https://api.github.com/users/LeC-D/followers", "following_url": "https://api.github.com/users/LeC-D/following{/other_user}", "gists_url": "https://api.github.com/users/LeC-D/gists{/gist_id}", "starred_url": "https://api.github.com/users/LeC-D/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LeC-D/subscriptions", "organizations_url": "https://api.github.com/users/LeC-D/orgs", "repos_url": "https://api.github.com/users/LeC-D/repos", "events_url": "https://api.github.com/users/LeC-D/events{/privacy}", "received_events_url": "https://api.github.com/users/LeC-D/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
closed
false
null
[]
null
0
2025-01-27T16:42:27
2025-01-27T17:11:11
2025-01-27T17:11:11
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
null
{ "login": "LeC-D", "id": 17554693, "node_id": "MDQ6VXNlcjE3NTU0Njkz", "avatar_url": "https://avatars.githubusercontent.com/u/17554693?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LeC-D", "html_url": "https://github.com/LeC-D", "followers_url": "https://api.github.com/users/LeC-D/followers", "following_url": "https://api.github.com/users/LeC-D/following{/other_user}", "gists_url": "https://api.github.com/users/LeC-D/gists{/gist_id}", "starred_url": "https://api.github.com/users/LeC-D/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LeC-D/subscriptions", "organizations_url": "https://api.github.com/users/LeC-D/orgs", "repos_url": "https://api.github.com/users/LeC-D/repos", "events_url": "https://api.github.com/users/LeC-D/events{/privacy}", "received_events_url": "https://api.github.com/users/LeC-D/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8610/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8610/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5463
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5463/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5463/comments
https://api.github.com/repos/ollama/ollama/issues/5463/events
https://github.com/ollama/ollama/issues/5463
2,388,916,789
I_kwDOJ0Z1Ps6OY_o1
5,463
InternLM2.5 - 7 billion parameter with 1M context length
{ "login": "Qualzz", "id": 35169816, "node_id": "MDQ6VXNlcjM1MTY5ODE2", "avatar_url": "https://avatars.githubusercontent.com/u/35169816?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Qualzz", "html_url": "https://github.com/Qualzz", "followers_url": "https://api.github.com/users/Qualzz/followers", "following_url": "https://api.github.com/users/Qualzz/following{/other_user}", "gists_url": "https://api.github.com/users/Qualzz/gists{/gist_id}", "starred_url": "https://api.github.com/users/Qualzz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Qualzz/subscriptions", "organizations_url": "https://api.github.com/users/Qualzz/orgs", "repos_url": "https://api.github.com/users/Qualzz/repos", "events_url": "https://api.github.com/users/Qualzz/events{/privacy}", "received_events_url": "https://api.github.com/users/Qualzz/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
3
2024-07-03T15:26:34
2024-07-03T23:16:23
2024-07-03T23:16:23
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
[Link to the collection](https://huggingface.co/collections/internlm/internlm25-66853f32717072d17581bc13) Introduction InternLM2.5 has open-sourced a 7 billion parameter base model and a chat model tailored for practical scenarios. The model has the following characteristics: Outstanding reasoning capability: State-of-the-art performance on Math reasoning, surpassing models like Llama3 and Gemma2-9B. 1M Context window: Nearly perfect at finding needles in the haystack with 1M-long context, with leading performance on long-context tasks like LongBench. Try it with [LMDeploy](https://huggingface.co/internlm/internlm2_5-7b-chat-1m/blob/main/chat/lmdeploy.md) for 1M-context inference. Stronger tool use: InternLM2.5 supports gathering information from more than 100 web pages, corresponding implementation will be released in [Lagent](https://github.com/InternLM/lagent/tree/main) soon. InternLM2.5 has better tool utilization-related capabilities in instruction following, tool selection and reflection. See [examples](https://huggingface.co/internlm/internlm2_5-7b-chat-1m/blob/main/agent/).
{ "login": "Qualzz", "id": 35169816, "node_id": "MDQ6VXNlcjM1MTY5ODE2", "avatar_url": "https://avatars.githubusercontent.com/u/35169816?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Qualzz", "html_url": "https://github.com/Qualzz", "followers_url": "https://api.github.com/users/Qualzz/followers", "following_url": "https://api.github.com/users/Qualzz/following{/other_user}", "gists_url": "https://api.github.com/users/Qualzz/gists{/gist_id}", "starred_url": "https://api.github.com/users/Qualzz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Qualzz/subscriptions", "organizations_url": "https://api.github.com/users/Qualzz/orgs", "repos_url": "https://api.github.com/users/Qualzz/repos", "events_url": "https://api.github.com/users/Qualzz/events{/privacy}", "received_events_url": "https://api.github.com/users/Qualzz/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5463/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5463/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/7932
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7932/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7932/comments
https://api.github.com/repos/ollama/ollama/issues/7932/events
https://github.com/ollama/ollama/pull/7932
2,718,376,460
PR_kwDOJ0Z1Ps6EEvme
7,932
fix unmarshaling merges
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-12-04T17:25:24
2024-12-05T01:10:38
2024-12-04T18:04:52
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7932", "html_url": "https://github.com/ollama/ollama/pull/7932", "diff_url": "https://github.com/ollama/ollama/pull/7932.diff", "patch_url": "https://github.com/ollama/ollama/pull/7932.patch", "merged_at": "2024-12-04T18:04:52" }
some models uses `[][]string` as `.model.merges` value rather than `[]string`
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7932/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7932/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/8582
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8582/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8582/comments
https://api.github.com/repos/ollama/ollama/issues/8582/events
https://github.com/ollama/ollama/issues/8582
2,811,053,112
I_kwDOJ0Z1Ps6njUQ4
8,582
ollama version is 0.5.7-0-ga420a45-dirty
{ "login": "Mario4272", "id": 19327923, "node_id": "MDQ6VXNlcjE5MzI3OTIz", "avatar_url": "https://avatars.githubusercontent.com/u/19327923?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Mario4272", "html_url": "https://github.com/Mario4272", "followers_url": "https://api.github.com/users/Mario4272/followers", "following_url": "https://api.github.com/users/Mario4272/following{/other_user}", "gists_url": "https://api.github.com/users/Mario4272/gists{/gist_id}", "starred_url": "https://api.github.com/users/Mario4272/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mario4272/subscriptions", "organizations_url": "https://api.github.com/users/Mario4272/orgs", "repos_url": "https://api.github.com/users/Mario4272/repos", "events_url": "https://api.github.com/users/Mario4272/events{/privacy}", "received_events_url": "https://api.github.com/users/Mario4272/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
0
2025-01-25T16:02:44
2025-01-25T16:02:44
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Can we talk about how to fix this? ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.5.7
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8582/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8582/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/4314
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4314/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4314/comments
https://api.github.com/repos/ollama/ollama/issues/4314/events
https://github.com/ollama/ollama/issues/4314
2,290,014,338
I_kwDOJ0Z1Ps6IftiC
4,314
the ai model downloading not working
{ "login": "Tochage143", "id": 108989114, "node_id": "U_kgDOBn8Kug", "avatar_url": "https://avatars.githubusercontent.com/u/108989114?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Tochage143", "html_url": "https://github.com/Tochage143", "followers_url": "https://api.github.com/users/Tochage143/followers", "following_url": "https://api.github.com/users/Tochage143/following{/other_user}", "gists_url": "https://api.github.com/users/Tochage143/gists{/gist_id}", "starred_url": "https://api.github.com/users/Tochage143/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Tochage143/subscriptions", "organizations_url": "https://api.github.com/users/Tochage143/orgs", "repos_url": "https://api.github.com/users/Tochage143/repos", "events_url": "https://api.github.com/users/Tochage143/events{/privacy}", "received_events_url": "https://api.github.com/users/Tochage143/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
6
2024-05-10T16:12:09
2025-01-25T07:58:29
2024-05-24T06:32:04
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? ollama run mistral:text pulling manifest Error: Head "https://dd20bb891979d25aebc8bec07b2b3bbc.r2.cloudflarestorage.com/ollama/docker/registry/v2/blobs/sha256/5b/5b5c2a563a287aa9bf9be7499fe7e0630add02089be3f50ee494087f67683fbb/data?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=66040c77ac1b787c3af820529859349a%!F(MISSING)20240510%!F(MISSING)auto%!F(MISSING)s3%!F(MISSING)aws4_request&X-Amz-Date=20240510T160627Z&X-Amz-Expires=1200&X-Amz-SignedHeaders=host&X-Amz-Signature=21ab36ad0c30857906a5f710d6d26dd63c905bd84ba24e580f43dff8b8b96133": dial tcp: lookup dd20bb891979d25aebc8bec07b2b3bbc.r2.cloudflarestorage.com: no such host ### OS Windows ### GPU Nvidia ### CPU Intel ### Ollama version v0.1.34
{ "login": "Tochage143", "id": 108989114, "node_id": "U_kgDOBn8Kug", "avatar_url": "https://avatars.githubusercontent.com/u/108989114?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Tochage143", "html_url": "https://github.com/Tochage143", "followers_url": "https://api.github.com/users/Tochage143/followers", "following_url": "https://api.github.com/users/Tochage143/following{/other_user}", "gists_url": "https://api.github.com/users/Tochage143/gists{/gist_id}", "starred_url": "https://api.github.com/users/Tochage143/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Tochage143/subscriptions", "organizations_url": "https://api.github.com/users/Tochage143/orgs", "repos_url": "https://api.github.com/users/Tochage143/repos", "events_url": "https://api.github.com/users/Tochage143/events{/privacy}", "received_events_url": "https://api.github.com/users/Tochage143/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4314/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4314/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3167
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3167/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3167/comments
https://api.github.com/repos/ollama/ollama/issues/3167/events
https://github.com/ollama/ollama/issues/3167
2,188,233,292
I_kwDOJ0Z1Ps6CbcpM
3,167
No welcoming window & pulling manifest Error: pull model manifest: file does not exist
{ "login": "HWiwoiiii", "id": 103039908, "node_id": "U_kgDOBiRDpA", "avatar_url": "https://avatars.githubusercontent.com/u/103039908?v=4", "gravatar_id": "", "url": "https://api.github.com/users/HWiwoiiii", "html_url": "https://github.com/HWiwoiiii", "followers_url": "https://api.github.com/users/HWiwoiiii/followers", "following_url": "https://api.github.com/users/HWiwoiiii/following{/other_user}", "gists_url": "https://api.github.com/users/HWiwoiiii/gists{/gist_id}", "starred_url": "https://api.github.com/users/HWiwoiiii/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/HWiwoiiii/subscriptions", "organizations_url": "https://api.github.com/users/HWiwoiiii/orgs", "repos_url": "https://api.github.com/users/HWiwoiiii/repos", "events_url": "https://api.github.com/users/HWiwoiiii/events{/privacy}", "received_events_url": "https://api.github.com/users/HWiwoiiii/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
2024-03-15T10:49:58
2024-03-18T08:22:58
2024-03-18T08:22:58
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When I first installed the ollama, I didn't see the welcoming window pops up, just a windows notification by it says "click here to start" Clicking that notification gives me a powershell window as shown and when I tried to run the command to get started it gives me the error below pulling manifest Error: pull model manifest: file does not exist ![image](https://github.com/ollama/ollama/assets/103039908/66196abc-46c4-49f7-902f-6ba304650ccd) ### What did you expect to see? There should be a welcoming window for me to get started ### Steps to reproduce Reinstall ### Are there any recent changes that introduced the issue? no change ### OS Windows ### Architecture Other ### Platform WSL, WSL2 ### Ollama version 0.1.29 ### GPU Nvidia ### GPU info w ### CPU Intel ### Other software Open WebUI
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3167/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3167/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8670
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8670/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8670/comments
https://api.github.com/repos/ollama/ollama/issues/8670/events
https://github.com/ollama/ollama/issues/8670
2,819,110,416
I_kwDOJ0Z1Ps6oCDYQ
8,670
Ollama official website API for fetching the models and its information
{ "login": "ALAWIII", "id": 60029291, "node_id": "MDQ6VXNlcjYwMDI5Mjkx", "avatar_url": "https://avatars.githubusercontent.com/u/60029291?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ALAWIII", "html_url": "https://github.com/ALAWIII", "followers_url": "https://api.github.com/users/ALAWIII/followers", "following_url": "https://api.github.com/users/ALAWIII/following{/other_user}", "gists_url": "https://api.github.com/users/ALAWIII/gists{/gist_id}", "starred_url": "https://api.github.com/users/ALAWIII/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ALAWIII/subscriptions", "organizations_url": "https://api.github.com/users/ALAWIII/orgs", "repos_url": "https://api.github.com/users/ALAWIII/repos", "events_url": "https://api.github.com/users/ALAWIII/events{/privacy}", "received_events_url": "https://api.github.com/users/ALAWIII/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 6573197867, "node_id": "LA_kwDOJ0Z1Ps8AAAABh8sKKw", "url": "https://api.github.com/repos/ollama/ollama/labels/ollama.com", "name": "ollama.com", "color": "ffffff", "default": false, "description": "" } ]
open
false
null
[]
null
0
2025-01-29T19:40:32
2025-01-29T22:31:42
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
We need to build an API for the ollama website where all the models and their descriptions details are stored so that we can automatically fetch those data and embed them in our various apps!
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8670/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 1, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8670/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1779
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1779/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1779/comments
https://api.github.com/repos/ollama/ollama/issues/1779/events
https://github.com/ollama/ollama/pull/1779
2,064,772,178
PR_kwDOJ0Z1Ps5jLXYa
1,779
Improve maintainability of Radeon card list
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-01-03T23:18:04
2024-01-04T00:19:00
2024-01-04T00:18:57
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1779", "html_url": "https://github.com/ollama/ollama/pull/1779", "diff_url": "https://github.com/ollama/ollama/pull/1779.diff", "patch_url": "https://github.com/ollama/ollama/pull/1779.patch", "merged_at": "2024-01-04T00:18:57" }
This moves the list of AMD GPUs to an easier to maintain list which should make it easier to update over time.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1779/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1779/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5789
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5789/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5789/comments
https://api.github.com/repos/ollama/ollama/issues/5789/events
https://github.com/ollama/ollama/issues/5789
2,418,305,371
I_kwDOJ0Z1Ps6QJGlb
5,789
(linux) snap support for linux installation this will solve many issues
{ "login": "olumolu", "id": 162728301, "node_id": "U_kgDOCbMJbQ", "avatar_url": "https://avatars.githubusercontent.com/u/162728301?v=4", "gravatar_id": "", "url": "https://api.github.com/users/olumolu", "html_url": "https://github.com/olumolu", "followers_url": "https://api.github.com/users/olumolu/followers", "following_url": "https://api.github.com/users/olumolu/following{/other_user}", "gists_url": "https://api.github.com/users/olumolu/gists{/gist_id}", "starred_url": "https://api.github.com/users/olumolu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/olumolu/subscriptions", "organizations_url": "https://api.github.com/users/olumolu/orgs", "repos_url": "https://api.github.com/users/olumolu/repos", "events_url": "https://api.github.com/users/olumolu/events{/privacy}", "received_events_url": "https://api.github.com/users/olumolu/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" } ]
open
false
null
[]
null
0
2024-07-19T07:59:05
2024-07-19T08:16:45
null
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
This will give the app a snadboxed approach for installation and also it will give other security benifit https://snapcraft.io/ * Simplified installation: They eliminate the need to manually resolve dependency issues, making software installation easier for users. * Isolation: They isolate applications from the system's core components and other applications, enhancing security and stability. * Faster updates: They enable faster and more frequent application updates without affecting the system. Mainly updates will be pushed and app can be updated very easy.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5789/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5789/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/6999
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6999/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6999/comments
https://api.github.com/repos/ollama/ollama/issues/6999/events
https://github.com/ollama/ollama/issues/6999
2,552,378,331
I_kwDOJ0Z1Ps6YIjPb
6,999
Jetson CUDA error
{ "login": "jarek7777", "id": 72649794, "node_id": "MDQ6VXNlcjcyNjQ5Nzk0", "avatar_url": "https://avatars.githubusercontent.com/u/72649794?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jarek7777", "html_url": "https://github.com/jarek7777", "followers_url": "https://api.github.com/users/jarek7777/followers", "following_url": "https://api.github.com/users/jarek7777/following{/other_user}", "gists_url": "https://api.github.com/users/jarek7777/gists{/gist_id}", "starred_url": "https://api.github.com/users/jarek7777/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jarek7777/subscriptions", "organizations_url": "https://api.github.com/users/jarek7777/orgs", "repos_url": "https://api.github.com/users/jarek7777/repos", "events_url": "https://api.github.com/users/jarek7777/events{/privacy}", "received_events_url": "https://api.github.com/users/jarek7777/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 5755339642, "node_id": "LA_kwDOJ0Z1Ps8AAAABVwuDeg", "url": "https://api.github.com/repos/ollama/ollama/labels/linux", "name": "linux", "color": "516E70", "default": false, "description": "" }, { "id": 6430601766, "node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg", "url": "https://api.github.com/repos/ollama/ollama/labels/nvidia", "name": "nvidia", "color": "8CDB00", "default": false, "description": "Issues relating to Nvidia GPUs and CUDA" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
1
2024-09-27T08:50:47
2024-11-12T18:31:53
2024-11-12T18:31:53
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I' ve built Ollama from source to run with CUDA but unfortunately it doesn't start any more, I think since version 0.3.10 it's the same error. Before 0.3.10 everything was fine I was able to build ollama with CUDA and run any model, but now even smallest model 1b is crashing I have the same Jetpack 6.0 with CUDA 12.2.140. Error: llama runner process has terminated: CUDA error: the resource allocation failed current device: 0, in function cublas_handle at ollama/llm/llama.cpp/ggml/src/ggml-cuda/common.cuh:644 cublasCreate_v2(&cublas_handles[device]) ollama/llm/llama.cpp/ggml/src/ggml-cuda.cu:102: CUDA error ### OS Linux ### GPU Nvidia, Other ### CPU Other ### Ollama version 0.3.12
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6999/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6999/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5513
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5513/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5513/comments
https://api.github.com/repos/ollama/ollama/issues/5513/events
https://github.com/ollama/ollama/pull/5513
2,393,351,374
PR_kwDOJ0Z1Ps50k-rf
5,513
llm: add `COMMON_DARWIN_DEFS` to arm static build
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-07-06T02:42:26
2024-07-06T02:42:43
2024-07-06T02:42:42
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5513", "html_url": "https://github.com/ollama/ollama/pull/5513", "diff_url": "https://github.com/ollama/ollama/pull/5513.diff", "patch_url": "https://github.com/ollama/ollama/pull/5513.patch", "merged_at": "2024-07-06T02:42:42" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5513/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5513/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7040
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7040/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7040/comments
https://api.github.com/repos/ollama/ollama/issues/7040/events
https://github.com/ollama/ollama/issues/7040
2,555,760,452
I_kwDOJ0Z1Ps6YVc9E
7,040
llama 3.2
{ "login": "olumolu", "id": 162728301, "node_id": "U_kgDOCbMJbQ", "avatar_url": "https://avatars.githubusercontent.com/u/162728301?v=4", "gravatar_id": "", "url": "https://api.github.com/users/olumolu", "html_url": "https://github.com/olumolu", "followers_url": "https://api.github.com/users/olumolu/followers", "following_url": "https://api.github.com/users/olumolu/following{/other_user}", "gists_url": "https://api.github.com/users/olumolu/gists{/gist_id}", "starred_url": "https://api.github.com/users/olumolu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/olumolu/subscriptions", "organizations_url": "https://api.github.com/users/olumolu/orgs", "repos_url": "https://api.github.com/users/olumolu/repos", "events_url": "https://api.github.com/users/olumolu/events{/privacy}", "received_events_url": "https://api.github.com/users/olumolu/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
2
2024-09-30T07:09:17
2024-09-30T21:05:54
2024-09-30T21:05:54
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
11b and 90b is not available in ollama can we have support for this
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7040/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7040/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8246
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8246/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8246/comments
https://api.github.com/repos/ollama/ollama/issues/8246/events
https://github.com/ollama/ollama/issues/8246
2,759,431,080
I_kwDOJ0Z1Ps6keZOo
8,246
Significant Performance Differences of ARM64 Installation Package Across Different CPU Architectures
{ "login": "lin12058", "id": 37627813, "node_id": "MDQ6VXNlcjM3NjI3ODEz", "avatar_url": "https://avatars.githubusercontent.com/u/37627813?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lin12058", "html_url": "https://github.com/lin12058", "followers_url": "https://api.github.com/users/lin12058/followers", "following_url": "https://api.github.com/users/lin12058/following{/other_user}", "gists_url": "https://api.github.com/users/lin12058/gists{/gist_id}", "starred_url": "https://api.github.com/users/lin12058/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lin12058/subscriptions", "organizations_url": "https://api.github.com/users/lin12058/orgs", "repos_url": "https://api.github.com/users/lin12058/repos", "events_url": "https://api.github.com/users/lin12058/events{/privacy}", "received_events_url": "https://api.github.com/users/lin12058/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
8
2024-12-26T07:57:42
2025-01-13T01:46:47
2025-01-13T01:46:47
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? When using an ARM64 installation package for a large language model with 1.5B parameters, we have observed significant differences in performance across different CPU architectures. Specifically, on the MediaTek Dimensity 8100 processor, the inference speed is very fast, capable of processing multiple tokens per second; however, on the Qualcomm Snapdragon 855 processor, the same task is extremely slow, typically taking 5 to 10 seconds to process a single token. > In terms of traditional performance, the CPU score of both exceeds 855 with 8100 However, from a performance perspective, the efficiency of 855 is about 100 times that of 8100 here is the comparison between the CPU instruction sets of the MediaTek Dimensity 8100 and the Qualcomm Snapdragon 855: ### Qualcomm Snapdragon 855: > The Snapdragon 855 utilizes a Kryo 485 CPU, which is a customized design based on ARM Cortex technology, specifically derived from the Cortex-A76 and Cortex-A55 architectures. These cores are also based on the ARMv8-A instruction set architecture but may be more specifically rooted in ARMv8.2-A or even ARMv8.4-A, as the A76 architecture brought new capabilities such as enhanced memory subsystem performance and improved branch prediction. ### Dimensity 8100: > The CPU cores of the Dimensity 8100 are based on the Cortex-A78 and Cortex-A55 architectures, both of which are part of the ARMv8-A family. ARMv8-A introduced support for 64-bit computing (AArch64 state) while also maintaining compatibility with existing 32-bit code (AArch32 state). The Cortex-A78 and Cortex-A55 are part of the ARMv8.2-A specification, indicating they also support additional features like FP16 (half-precision floating-point) computations, which are beneficial for machine learning tasks. # Based on my understanding, the instruction set should support more features, and therefore, there should not be significant performance issues. ## Qualcomm Snapdragon 855 ``` Architecture: aarch64 CPU op-mode(s): 32-bit, 64-bit Byte Order: Little Endian CPU(s): 8 On-line CPU(s) list: 0-7 Thread(s) per core: 1 Core(s) per socket: 2 Socket(s): 3 Vendor ID: Qualcomm Model: 14 Stepping: 0xd CPU max MHz: 2841.6001 CPU min MHz: 300.0000 BogoMIPS: 38.40 Vulnerability Itlb multihit: Not affected Vulnerability L1tf: Not affected Vulnerability Mds: Not affected Vulnerability Meltdown: Mitigation; PTI Vulnerability Spec store bypass: Vulnerable Vulnerability Spectre v1: Mitigation; __user pointer sanitization Vulnerability Spectre v2: Mitigation; Branch predictor hardening Vulnerability Tsx async abort: Not affected Flags: fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm lrcpc dcpop asimddp ``` ## MediaTek Dimensity 8100 ``` Architecture: aarch64 CPU op-mode(s): 32-bit, 64-bit Byte Order: Little Endian CPU(s): 8 On-line CPU(s) list: 0-7 Thread(s) per core: 1 Core(s) per socket: 2 Socket(s): 3 Vendor ID: ARM Model: 0 Model name: Cortex-A55 Stepping: r2p0 CPU max MHz: 2850.0000 CPU min MHz: 200.0000 BogoMIPS: 26.00 Vulnerability Gather data sampling: Not affected Vulnerability Itlb multihit: Not affected Vulnerability L1tf: Not affected Vulnerability Mds: Not affected Vulnerability Meltdown: Not affected Vulnerability Mmio stale data: Not affected Vulnerability Retbleed: Not affected Vulnerability Spec rstack overflow: Not affected Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl Vulnerability Spectre v1: Mitigation; __user pointer sanitization Vulnerability Spectre v2: Vulnerable: Unprivileged eBPF enabled Vulnerability Srbds: Not affected Vulnerability Tsx async abort: Not affected Flags: fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm lrcpc dcpop asimddp ``` ### OS android - aidlux : The virtual environment https://community.aidlux.com/aidluxdownload ### GPU No GPU ### CPU Qualcomm Snapdragon 855 MediaTek Dimensity 8100 ### Ollama version ollama version is 0.5.4
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8246/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8246/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3890
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3890/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3890/comments
https://api.github.com/repos/ollama/ollama/issues/3890/events
https://github.com/ollama/ollama/issues/3890
2,262,142,683
I_kwDOJ0Z1Ps6G1Y7b
3,890
settings>chat hanging when selected
{ "login": "arjunkrishna", "id": 5271912, "node_id": "MDQ6VXNlcjUyNzE5MTI=", "avatar_url": "https://avatars.githubusercontent.com/u/5271912?v=4", "gravatar_id": "", "url": "https://api.github.com/users/arjunkrishna", "html_url": "https://github.com/arjunkrishna", "followers_url": "https://api.github.com/users/arjunkrishna/followers", "following_url": "https://api.github.com/users/arjunkrishna/following{/other_user}", "gists_url": "https://api.github.com/users/arjunkrishna/gists{/gist_id}", "starred_url": "https://api.github.com/users/arjunkrishna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arjunkrishna/subscriptions", "organizations_url": "https://api.github.com/users/arjunkrishna/orgs", "repos_url": "https://api.github.com/users/arjunkrishna/repos", "events_url": "https://api.github.com/users/arjunkrishna/events{/privacy}", "received_events_url": "https://api.github.com/users/arjunkrishna/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
0
2024-04-24T20:37:04
2024-04-24T20:43:11
2024-04-24T20:43:11
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? settings>chat link makes the whole ui hang and needs page refresh. command used to host image. ```powershell script docker run -d -p 3000:8081 -e OLLAMA_BASE_URL=http://host.docker.internal:11434 -e PORT=8081 -e MODEL_FILTER_ENABLED=True -e MODEL_FILTER_LIST="mistral:latest;llava:latest;llama3:latest" -e USER_PERMISSIONS_CHAT_DELETION=True -e DEFAULT_USER_ROLE=user -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main ``` ### OS Windows, Docker, WSL2 ### GPU Nvidia ### CPU Intel ### Ollama version v0.1.121
{ "login": "arjunkrishna", "id": 5271912, "node_id": "MDQ6VXNlcjUyNzE5MTI=", "avatar_url": "https://avatars.githubusercontent.com/u/5271912?v=4", "gravatar_id": "", "url": "https://api.github.com/users/arjunkrishna", "html_url": "https://github.com/arjunkrishna", "followers_url": "https://api.github.com/users/arjunkrishna/followers", "following_url": "https://api.github.com/users/arjunkrishna/following{/other_user}", "gists_url": "https://api.github.com/users/arjunkrishna/gists{/gist_id}", "starred_url": "https://api.github.com/users/arjunkrishna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arjunkrishna/subscriptions", "organizations_url": "https://api.github.com/users/arjunkrishna/orgs", "repos_url": "https://api.github.com/users/arjunkrishna/repos", "events_url": "https://api.github.com/users/arjunkrishna/events{/privacy}", "received_events_url": "https://api.github.com/users/arjunkrishna/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3890/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3890/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3691
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3691/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3691/comments
https://api.github.com/repos/ollama/ollama/issues/3691/events
https://github.com/ollama/ollama/issues/3691
2,247,350,100
I_kwDOJ0Z1Ps6F89dU
3,691
why wizard 8*22b run on CPU?
{ "login": "taozhiyuai", "id": 146583103, "node_id": "U_kgDOCLyuPw", "avatar_url": "https://avatars.githubusercontent.com/u/146583103?v=4", "gravatar_id": "", "url": "https://api.github.com/users/taozhiyuai", "html_url": "https://github.com/taozhiyuai", "followers_url": "https://api.github.com/users/taozhiyuai/followers", "following_url": "https://api.github.com/users/taozhiyuai/following{/other_user}", "gists_url": "https://api.github.com/users/taozhiyuai/gists{/gist_id}", "starred_url": "https://api.github.com/users/taozhiyuai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/taozhiyuai/subscriptions", "organizations_url": "https://api.github.com/users/taozhiyuai/orgs", "repos_url": "https://api.github.com/users/taozhiyuai/repos", "events_url": "https://api.github.com/users/taozhiyuai/events{/privacy}", "received_events_url": "https://api.github.com/users/taozhiyuai/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
4
2024-04-17T04:33:15
2024-05-02T20:49:53
2024-04-17T20:45:14
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? <img width="1148" alt="ζˆͺ屏2024-04-17 12 23 02" src="https://github.com/ollama/ollama/assets/146583103/4121905b-51b5-4435-a323-ea773e2015a8"> as shown in the picture. it run on CPU. my laptop is Mac m3 max 128GB. LM Studio can not run wizardlm-2-8*22b-Q8, it will OOM. but ollama can run it. but on CPU it seems. is that possible to run on GPU? ### What did you expect to see? _No response_ ### Steps to reproduce _No response_ ### Are there any recent changes that introduced the issue? _No response_ ### OS macOS ### Architecture _No response_ ### Platform _No response_ ### Ollama version up to date ### GPU Apple ### GPU info _No response_ ### CPU Apple ### Other software _No response_
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3691/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3691/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/4007
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4007/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4007/comments
https://api.github.com/repos/ollama/ollama/issues/4007/events
https://github.com/ollama/ollama/pull/4007
2,267,682,586
PR_kwDOJ0Z1Ps5t8PQB
4,007
Fix typos in README.md
{ "login": "arpitjain099", "id": 3242828, "node_id": "MDQ6VXNlcjMyNDI4Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/3242828?v=4", "gravatar_id": "", "url": "https://api.github.com/users/arpitjain099", "html_url": "https://github.com/arpitjain099", "followers_url": "https://api.github.com/users/arpitjain099/followers", "following_url": "https://api.github.com/users/arpitjain099/following{/other_user}", "gists_url": "https://api.github.com/users/arpitjain099/gists{/gist_id}", "starred_url": "https://api.github.com/users/arpitjain099/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arpitjain099/subscriptions", "organizations_url": "https://api.github.com/users/arpitjain099/orgs", "repos_url": "https://api.github.com/users/arpitjain099/repos", "events_url": "https://api.github.com/users/arpitjain099/events{/privacy}", "received_events_url": "https://api.github.com/users/arpitjain099/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-04-28T16:58:10
2024-05-01T17:39:53
2024-05-01T17:39:38
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4007", "html_url": "https://github.com/ollama/ollama/pull/4007", "diff_url": "https://github.com/ollama/ollama/pull/4007.diff", "patch_url": "https://github.com/ollama/ollama/pull/4007.patch", "merged_at": "2024-05-01T17:39:38" }
- There was a lagging comma instead of a dot - Some other minor changes
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4007/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4007/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5115
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5115/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5115/comments
https://api.github.com/repos/ollama/ollama/issues/5115/events
https://github.com/ollama/ollama/issues/5115
2,359,736,579
I_kwDOJ0Z1Ps6MprkD
5,115
how to produce Embeddings model file? No examples
{ "login": "qdrddr", "id": 564658, "node_id": "MDQ6VXNlcjU2NDY1OA==", "avatar_url": "https://avatars.githubusercontent.com/u/564658?v=4", "gravatar_id": "", "url": "https://api.github.com/users/qdrddr", "html_url": "https://github.com/qdrddr", "followers_url": "https://api.github.com/users/qdrddr/followers", "following_url": "https://api.github.com/users/qdrddr/following{/other_user}", "gists_url": "https://api.github.com/users/qdrddr/gists{/gist_id}", "starred_url": "https://api.github.com/users/qdrddr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/qdrddr/subscriptions", "organizations_url": "https://api.github.com/users/qdrddr/orgs", "repos_url": "https://api.github.com/users/qdrddr/repos", "events_url": "https://api.github.com/users/qdrddr/events{/privacy}", "received_events_url": "https://api.github.com/users/qdrddr/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
0
2024-06-18T12:23:48
2024-06-18T12:23:48
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi how to produce Embeddings model file? The example from GitHub doesn’t mention Embeddings https://github.com/ollama/ollama/blob/main/docs/modelfile.md For example for these popular leading models? https://huggingface.co/GritLM/GritLM-7B https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct Leaderboard https://huggingface.co/spaces/mteb/leaderboard
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5115/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5115/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/1044
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1044/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1044/comments
https://api.github.com/repos/ollama/ollama/issues/1044/events
https://github.com/ollama/ollama/pull/1044
1,983,962,515
PR_kwDOJ0Z1Ps5e8kzm
1,044
Added Ollama4j (Java library) to community integrations
{ "login": "amithkoujalgi", "id": 1876165, "node_id": "MDQ6VXNlcjE4NzYxNjU=", "avatar_url": "https://avatars.githubusercontent.com/u/1876165?v=4", "gravatar_id": "", "url": "https://api.github.com/users/amithkoujalgi", "html_url": "https://github.com/amithkoujalgi", "followers_url": "https://api.github.com/users/amithkoujalgi/followers", "following_url": "https://api.github.com/users/amithkoujalgi/following{/other_user}", "gists_url": "https://api.github.com/users/amithkoujalgi/gists{/gist_id}", "starred_url": "https://api.github.com/users/amithkoujalgi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/amithkoujalgi/subscriptions", "organizations_url": "https://api.github.com/users/amithkoujalgi/orgs", "repos_url": "https://api.github.com/users/amithkoujalgi/repos", "events_url": "https://api.github.com/users/amithkoujalgi/events{/privacy}", "received_events_url": "https://api.github.com/users/amithkoujalgi/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2023-11-08T16:18:12
2023-11-08T19:04:33
2023-11-08T19:04:33
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1044", "html_url": "https://github.com/ollama/ollama/pull/1044", "diff_url": "https://github.com/ollama/ollama/pull/1044.diff", "patch_url": "https://github.com/ollama/ollama/pull/1044.patch", "merged_at": "2023-11-08T19:04:32" }
Hi @jmorganca, thank you so much for your efforts in building and constantly updating such a cool piece of software. I really like Ollama and have been experimenting with it regularly. I also plan to integrate it into many more Java applications. I noticed there isn't a Java library listed under community integrations, that's why I felt the need to create a Java library for interacting with Ollama's REST APIs, and as a result, I've published Ollama4j as a package and I plan to constantly improve it. This way, others who work with Java can easily interact with the Ollama API. Thanks again, and keep up the great work!
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1044/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1044/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4988
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4988/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4988/comments
https://api.github.com/repos/ollama/ollama/issues/4988/events
https://github.com/ollama/ollama/issues/4988
2,347,611,501
I_kwDOJ0Z1Ps6L7bVt
4,988
[Model Request] Add dolphin-qwen2
{ "login": "mak448a", "id": 94062293, "node_id": "U_kgDOBZtG1Q", "avatar_url": "https://avatars.githubusercontent.com/u/94062293?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mak448a", "html_url": "https://github.com/mak448a", "followers_url": "https://api.github.com/users/mak448a/followers", "following_url": "https://api.github.com/users/mak448a/following{/other_user}", "gists_url": "https://api.github.com/users/mak448a/gists{/gist_id}", "starred_url": "https://api.github.com/users/mak448a/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mak448a/subscriptions", "organizations_url": "https://api.github.com/users/mak448a/orgs", "repos_url": "https://api.github.com/users/mak448a/repos", "events_url": "https://api.github.com/users/mak448a/events{/privacy}", "received_events_url": "https://api.github.com/users/mak448a/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
open
false
null
[]
null
2
2024-06-12T01:48:20
2024-06-12T17:07:24
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Could you add dolphin-qwen2? Thanks.
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4988/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4988/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/5579
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5579/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5579/comments
https://api.github.com/repos/ollama/ollama/issues/5579/events
https://github.com/ollama/ollama/pull/5579
2,398,862,311
PR_kwDOJ0Z1Ps503mwf
5,579
Statically link c++ and thread lib on windows
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-07-09T18:20:50
2024-07-09T19:21:16
2024-07-09T19:21:13
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5579", "html_url": "https://github.com/ollama/ollama/pull/5579", "diff_url": "https://github.com/ollama/ollama/pull/5579.diff", "patch_url": "https://github.com/ollama/ollama/pull/5579.patch", "merged_at": "2024-07-09T19:21:13" }
This makes sure we statically link the c++ and thread library on windows to avoid unnecessary runtime dependencies on non-standard DLLs On my dev box, I have these `libpthread.dll.a` libraries and my local builds were leaking the dependency e.g.: ``` > gci -path C:\msys64\ -r -fi 'libpthread.dll.a' Directory: C:\msys64\ucrt64\lib Mode LastWriteTime Length Name ---- ------------- ------ ---- -a--- 4/27/2024 12:18 PM 94672 libpthread.dll.a ``` Before this change (my local builds): ``` > dumpbin /dependents ./ollama.exe Microsoft (R) COFF/PE Dumper Version 14.40.33811.0 Copyright (C) Microsoft Corporation. All rights reserved. Dump of file .\ollama.exe File Type: EXECUTABLE IMAGE Image has the following dependencies: libstdc++-6.dll KERNEL32.dll msvcrt.dll libwinpthread-1.dll ``` After this change: ``` > dumpbin /dependents .\ollama.exe Microsoft (R) COFF/PE Dumper Version 14.29.30154.0 Copyright (C) Microsoft Corporation. All rights reserved. Dump of file .\ollama.exe File Type: EXECUTABLE IMAGE Image has the following dependencies: KERNEL32.dll msvcrt.dll ```
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5579/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5579/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6926
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6926/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6926/comments
https://api.github.com/repos/ollama/ollama/issues/6926/events
https://github.com/ollama/ollama/issues/6926
2,544,194,257
I_kwDOJ0Z1Ps6XpVLR
6,926
Unable to use multiple GPUs
{ "login": "bluebirdlinlin", "id": 127192008, "node_id": "U_kgDOB5TLyA", "avatar_url": "https://avatars.githubusercontent.com/u/127192008?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bluebirdlinlin", "html_url": "https://github.com/bluebirdlinlin", "followers_url": "https://api.github.com/users/bluebirdlinlin/followers", "following_url": "https://api.github.com/users/bluebirdlinlin/following{/other_user}", "gists_url": "https://api.github.com/users/bluebirdlinlin/gists{/gist_id}", "starred_url": "https://api.github.com/users/bluebirdlinlin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bluebirdlinlin/subscriptions", "organizations_url": "https://api.github.com/users/bluebirdlinlin/orgs", "repos_url": "https://api.github.com/users/bluebirdlinlin/repos", "events_url": "https://api.github.com/users/bluebirdlinlin/events{/privacy}", "received_events_url": "https://api.github.com/users/bluebirdlinlin/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 5755339642, "node_id": "LA_kwDOJ0Z1Ps8AAAABVwuDeg", "url": "https://api.github.com/repos/ollama/ollama/labels/linux", "name": "linux", "color": "516E70", "default": false, "description": "" }, { "id": 6430601766, "node_id": "LA_kwDOJ0Z1Ps8AAAABf0syJg", "url": "https://api.github.com/repos/ollama/ollama/labels/nvidia", "name": "nvidia", "color": "8CDB00", "default": false, "description": "Issues relating to Nvidia GPUs and CUDA" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
8
2024-09-24T03:35:39
2024-09-25T00:10:17
2024-09-25T00:09:48
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? my GPUS info: 6 numbers of A10, but when i run ollama (qwen:32b),it works oom. and i see only a gpu works, others can't use. this is the logs. please help me this issue, thanks. llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = qwen2 llama_model_loader: - kv 1: general.name str = Qwen2-beta-7B-Chat llama_model_loader: - kv 2: qwen2.block_count u32 = 32 llama_model_loader: - kv 3: qwen2.context_length u32 = 32768 llama_model_loader: - kv 4: qwen2.embedding_length u32 = 4096 llama_model_loader: - kv 5: qwen2.feed_forward_length u32 = 11008 llama_model_loader: - kv 6: qwen2.attention.head_count u32 = 32 llama_model_loader: - kv 7: qwen2.attention.head_count_kv u32 = 32 llama_model_loader: - kv 8: qwen2.attention.layer_norm_rms_epsilon f32 = 0.000001 llama_model_loader: - kv 9: qwen2.use_parallel_residual bool = true llama_model_loader: - kv 10: tokenizer.ggml.model str = gpt2 llama_model_loader: - kv 11: tokenizer.ggml.tokens arr[str,151936] = ["!", "\"", "#", "$", "%", "&", "'", ... llama_model_loader: - kv 12: tokenizer.ggml.token_type arr[i32,151936] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... time=2024-09-24T03:32:27.691Z level=INFO source=server.go:625 msg="waiting for server to become available" status="llm server loading model" llama_model_loader: - kv 13: tokenizer.ggml.merges arr[str,151387] = ["Δ  Δ ", "Δ Δ  Δ Δ ", "i n", "Δ  t",... llama_model_loader: - kv 14: tokenizer.ggml.eos_token_id u32 = 151643 llama_model_loader: - kv 15: tokenizer.ggml.padding_token_id u32 = 151643 llama_model_loader: - kv 16: tokenizer.ggml.bos_token_id u32 = 151643 llama_model_loader: - kv 17: tokenizer.chat_template str = {% for message in messages %}{{'<|im_... llama_model_loader: - kv 18: general.quantization_version u32 = 2 llama_model_loader: - kv 19: general.file_type u32 = 2 llama_model_loader: - type f32: 161 tensors llama_model_loader: - type q4_0: 225 tensors llama_model_loader: - type q6_K: 1 tensors llm_load_vocab: missing or unrecognized pre-tokenizer type, using: 'default' llm_load_vocab: special tokens cache size = 293 llm_load_vocab: token to piece cache size = 0.9338 MB llm_load_print_meta: format = GGUF V3 (latest) llm_load_print_meta: arch = qwen2 llm_load_print_meta: vocab type = BPE llm_load_print_meta: n_vocab = 151936 llm_load_print_meta: n_merges = 151387 llm_load_print_meta: vocab_only = 0 llm_load_print_meta: n_ctx_train = 32768 llm_load_print_meta: n_embd = 4096 llm_load_print_meta: n_layer = 32 llm_load_print_meta: n_head = 32 llm_load_print_meta: n_head_kv = 32 llm_load_print_meta: n_rot = 128 llm_load_print_meta: n_swa = 0 llm_load_print_meta: n_embd_head_k = 128 llm_load_print_meta: n_embd_head_v = 128 llm_load_print_meta: n_gqa = 1 llm_load_print_meta: n_embd_k_gqa = 4096 llm_load_print_meta: n_embd_v_gqa = 4096 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-06 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: f_logit_scale = 0.0e+00 llm_load_print_meta: n_ff = 11008 llm_load_print_meta: n_expert = 0 llm_load_print_meta: n_expert_used = 0 llm_load_print_meta: causal attn = 1 llm_load_print_meta: pooling type = 0 llm_load_print_meta: rope type = 2 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 10000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_ctx_orig_yarn = 32768 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: ssm_d_conv = 0 llm_load_print_meta: ssm_d_inner = 0 llm_load_print_meta: ssm_d_state = 0 llm_load_print_meta: ssm_dt_rank = 0 llm_load_print_meta: model type = 7B llm_load_print_meta: model ftype = Q4_0 llm_load_print_meta: model params = 7.72 B llm_load_print_meta: model size = 4.20 GiB (4.67 BPW) llm_load_print_meta: general.name = Qwen2-beta-7B-Chat llm_load_print_meta: BOS token = 151643 '<|endoftext|>' llm_load_print_meta: EOS token = 151643 '<|endoftext|>' llm_load_print_meta: PAD token = 151643 '<|endoftext|>' llm_load_print_meta: LF token = 148848 'Γ„Δ¬' llm_load_print_meta: EOT token = 151645 '<|im_end|>' llm_load_print_meta: max token length = 256 ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no ggml_cuda_init: found 1 CUDA devices: Device 0: NVIDIA A10, compute capability 8.6, VMM: yes llm_load_tensors: ggml ctx size = 0.34 MiB time=2024-09-24T03:32:29.148Z level=INFO source=server.go:625 msg="waiting for server to become available" status="llm server not responding" llm_load_tensors: offloading 32 repeating layers to GPU llm_load_tensors: offloading non-repeating layers to GPU llm_load_tensors: offloaded 33/33 layers to GPU llm_load_tensors: CPU buffer size = 333.84 MiB llm_load_tensors: CUDA0 buffer size = 3963.38 MiB time=2024-09-24T03:32:29.400Z level=INFO source=server.go:625 msg="waiting for server to become available" status="llm server loading model" llama_new_context_with_model: n_ctx = 8192 llama_new_context_with_model: n_batch = 512 llama_new_context_with_model: n_ubatch = 512 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 10000.0 llama_new_context_with_model: freq_scale = 1 llama_kv_cache_init: CUDA0 KV buffer size = 4096.00 MiB llama_new_context_with_model: KV self size = 4096.00 MiB, K (f16): 2048.00 MiB, V (f16): 2048.00 MiB llama_new_context_with_model: CUDA_Host output buffer size = 2.38 MiB llama_new_context_with_model: CUDA0 compute buffer size = 560.00 MiB llama_new_context_with_model: CUDA_Host compute buffer size = 24.01 MiB llama_new_context_with_model: graph nodes = 1126 llama_new_context_with_model: graph splits = 2 INFO [main] model loaded | tid="140002756288512" timestamp=1727148752 time=2024-09-24T03:32:32.416Z level=INFO source=server.go:630 msg="llama runner started in 4.98 seconds" [GIN] 2024/09/24 - 03:32:32 | 200 | 6.624226742s | 127.0.0.1 | POST "/api/chat" [GIN] 2024/09/24 - 03:32:43 | 200 | 1.193765361s | 127.0.0.1 | POST "/api/chat" [GIN] 2024/09/24 - 03:32:53 | 200 | 947.380979ms | 127.0.0.1 | POST "/api/chat" CUDA error: an illegal memory access was encountered current device: 0, in function ggml_backend_cuda_synchronize at /go/src/github.com/ollama/ollama/llm/llama.cpp/ggml/src/ggml-cuda.cu:2416 cudaStreamSynchronize(cuda_ctx->stream()) /go/src/github.com/ollama/ollama/llm/llama.cpp/ggml/src/ggml-cuda.cu:101: CUDA error free(): corrupted unsorted chunks ### OS Linux, Docker ### GPU Nvidia ### CPU _No response_ ### Ollama version latest
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6926/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6926/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3586
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3586/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3586/comments
https://api.github.com/repos/ollama/ollama/issues/3586/events
https://github.com/ollama/ollama/pull/3586
2,236,613,763
PR_kwDOJ0Z1Ps5sTRAP
3,586
types/model: remove MarshalText/UnmarshalText from Digest
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-04-10T23:52:03
2024-04-10T23:52:50
2024-04-10T23:52:49
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3586", "html_url": "https://github.com/ollama/ollama/pull/3586", "diff_url": "https://github.com/ollama/ollama/pull/3586.diff", "patch_url": "https://github.com/ollama/ollama/pull/3586.patch", "merged_at": "2024-04-10T23:52:49" }
null
{ "login": "bmizerany", "id": 46, "node_id": "MDQ6VXNlcjQ2", "avatar_url": "https://avatars.githubusercontent.com/u/46?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bmizerany", "html_url": "https://github.com/bmizerany", "followers_url": "https://api.github.com/users/bmizerany/followers", "following_url": "https://api.github.com/users/bmizerany/following{/other_user}", "gists_url": "https://api.github.com/users/bmizerany/gists{/gist_id}", "starred_url": "https://api.github.com/users/bmizerany/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bmizerany/subscriptions", "organizations_url": "https://api.github.com/users/bmizerany/orgs", "repos_url": "https://api.github.com/users/bmizerany/repos", "events_url": "https://api.github.com/users/bmizerany/events{/privacy}", "received_events_url": "https://api.github.com/users/bmizerany/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3586/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3586/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7430
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7430/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7430/comments
https://api.github.com/repos/ollama/ollama/issues/7430/events
https://github.com/ollama/ollama/pull/7430
2,625,393,604
PR_kwDOJ0Z1Ps6Ac3AS
7,430
feat: allow setting KV cache type
{ "login": "sammcj", "id": 862951, "node_id": "MDQ6VXNlcjg2Mjk1MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/862951?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sammcj", "html_url": "https://github.com/sammcj", "followers_url": "https://api.github.com/users/sammcj/followers", "following_url": "https://api.github.com/users/sammcj/following{/other_user}", "gists_url": "https://api.github.com/users/sammcj/gists{/gist_id}", "starred_url": "https://api.github.com/users/sammcj/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sammcj/subscriptions", "organizations_url": "https://api.github.com/users/sammcj/orgs", "repos_url": "https://api.github.com/users/sammcj/repos", "events_url": "https://api.github.com/users/sammcj/events{/privacy}", "received_events_url": "https://api.github.com/users/sammcj/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-10-30T21:33:06
2024-12-06T22:50:56
2024-11-18T06:34:43
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
true
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/7430", "html_url": "https://github.com/ollama/ollama/pull/7430", "diff_url": "https://github.com/ollama/ollama/pull/7430.diff", "patch_url": "https://github.com/ollama/ollama/pull/7430.patch", "merged_at": null }
null
{ "login": "sammcj", "id": 862951, "node_id": "MDQ6VXNlcjg2Mjk1MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/862951?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sammcj", "html_url": "https://github.com/sammcj", "followers_url": "https://api.github.com/users/sammcj/followers", "following_url": "https://api.github.com/users/sammcj/following{/other_user}", "gists_url": "https://api.github.com/users/sammcj/gists{/gist_id}", "starred_url": "https://api.github.com/users/sammcj/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sammcj/subscriptions", "organizations_url": "https://api.github.com/users/sammcj/orgs", "repos_url": "https://api.github.com/users/sammcj/repos", "events_url": "https://api.github.com/users/sammcj/events{/privacy}", "received_events_url": "https://api.github.com/users/sammcj/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7430/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/7430/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/2599
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2599/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2599/comments
https://api.github.com/repos/ollama/ollama/issues/2599/events
https://github.com/ollama/ollama/pull/2599
2,143,117,889
PR_kwDOJ0Z1Ps5nVAvQ
2,599
Explicitly disable AVX2 on GPU builds
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-02-19T20:43:31
2024-02-19T21:13:08
2024-02-19T21:13:05
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/2599", "html_url": "https://github.com/ollama/ollama/pull/2599", "diff_url": "https://github.com/ollama/ollama/pull/2599.diff", "patch_url": "https://github.com/ollama/ollama/pull/2599.patch", "merged_at": "2024-02-19T21:13:05" }
Even though we weren't setting it to on, somewhere in the cmake config it was getting toggled on. By explicitly setting it to off, we get `/arch:AVX` as intended. fixes #2527
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2599/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2599/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3783
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3783/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3783/comments
https://api.github.com/repos/ollama/ollama/issues/3783/events
https://github.com/ollama/ollama/pull/3783
2,254,684,766
PR_kwDOJ0Z1Ps5tQTlb
3,783
Allow any kind of JSON including arrays or straight literals, not just objects
{ "login": "hughescr", "id": 46348, "node_id": "MDQ6VXNlcjQ2MzQ4", "avatar_url": "https://avatars.githubusercontent.com/u/46348?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hughescr", "html_url": "https://github.com/hughescr", "followers_url": "https://api.github.com/users/hughescr/followers", "following_url": "https://api.github.com/users/hughescr/following{/other_user}", "gists_url": "https://api.github.com/users/hughescr/gists{/gist_id}", "starred_url": "https://api.github.com/users/hughescr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hughescr/subscriptions", "organizations_url": "https://api.github.com/users/hughescr/orgs", "repos_url": "https://api.github.com/users/hughescr/repos", "events_url": "https://api.github.com/users/hughescr/events{/privacy}", "received_events_url": "https://api.github.com/users/hughescr/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
2
2024-04-20T19:22:27
2024-11-24T00:47:27
2024-11-24T00:47:26
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3783", "html_url": "https://github.com/ollama/ollama/pull/3783", "diff_url": "https://github.com/ollama/ollama/pull/3783.diff", "patch_url": "https://github.com/ollama/ollama/pull/3783.patch", "merged_at": null }
This is a simple PR which allows the `format=json` constraint to generate any valid JSON, not just JSON with an object at the top level. This allows models to generate arrays, strings, numbers, or literals without an object wrapper around them, reducing the number of tokens necessary for many use cases. Remember that every token is more CO2 emitted into the atmosphere and more $$$ transferred from everyone's pockets into NVidia's treasure. Tokens matter!
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3783/reactions", "total_count": 2, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 1, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3783/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/6537
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/6537/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/6537/comments
https://api.github.com/repos/ollama/ollama/issues/6537/events
https://github.com/ollama/ollama/pull/6537
2,490,557,255
PR_kwDOJ0Z1Ps55opJT
6,537
Add metrics endpoint and basic request metrics otel based
{ "login": "amila-ku", "id": 12775690, "node_id": "MDQ6VXNlcjEyNzc1Njkw", "avatar_url": "https://avatars.githubusercontent.com/u/12775690?v=4", "gravatar_id": "", "url": "https://api.github.com/users/amila-ku", "html_url": "https://github.com/amila-ku", "followers_url": "https://api.github.com/users/amila-ku/followers", "following_url": "https://api.github.com/users/amila-ku/following{/other_user}", "gists_url": "https://api.github.com/users/amila-ku/gists{/gist_id}", "starred_url": "https://api.github.com/users/amila-ku/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/amila-ku/subscriptions", "organizations_url": "https://api.github.com/users/amila-ku/orgs", "repos_url": "https://api.github.com/users/amila-ku/repos", "events_url": "https://api.github.com/users/amila-ku/events{/privacy}", "received_events_url": "https://api.github.com/users/amila-ku/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
open
false
null
[]
null
8
2024-08-27T23:54:05
2025-01-14T02:50:06
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/6537", "html_url": "https://github.com/ollama/ollama/pull/6537", "diff_url": "https://github.com/ollama/ollama/pull/6537.diff", "patch_url": "https://github.com/ollama/ollama/pull/6537.patch", "merged_at": null }
Resolves https://github.com/ollama/ollama/issues/3144 This pull request is to add /metrics endpoint and http metrics as a starting point. It uses otel metrics libary and exposes metrics in prometheus format. This PR does not try to cover all metrics to keep it simple. If this looks good. I could add few more that will be useful. How to test: once Ollama server is running pull a model and list(or any other Ollama actions) ``` curl http://127.0.0.1:11434/metrics ``` example of custom metrics(not all are shown since i tried only few commands): Ollama request metrics: ``` % curl http://localhost:11434/metrics | grep -i ollama % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 5724 0 5724 0 0 1092k 0 --:--:-- --:--:-- --:--:-- 1117k model_actions_total{action="list",otel_scope_name="ollama",otel_scope_version="",status="OK",status_code="200"} 1 otel_scope_info{otel_scope_name="ollama",otel_scope_version=""} 1 requests_total{action="all",otel_scope_name="ollama",otel_scope_version="",status="OK",status_code="200"} 2 target_info{service_name="unknown_service:ollama",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="1.27.0"} 1 ``` All metrics : ``` % curl http://localhost:11434/metrics # HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 2.2875e-05 go_gc_duration_seconds{quantile="0.25"} 2.2875e-05 go_gc_duration_seconds{quantile="0.5"} 3.0375e-05 go_gc_duration_seconds{quantile="0.75"} 3.0375e-05 go_gc_duration_seconds{quantile="1"} 3.0375e-05 go_gc_duration_seconds_sum 5.325e-05 go_gc_duration_seconds_count 2 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 10 # HELP go_info Information about the Go environment. # TYPE go_info gauge go_info{version="go1.22.0"} 1 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge go_memstats_alloc_bytes 3.247952e+06 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter go_memstats_alloc_bytes_total 5.271112e+06 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge go_memstats_buck_hash_sys_bytes 11524 # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter go_memstats_frees_total 21294 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge go_memstats_gc_sys_bytes 3.159336e+06 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge go_memstats_heap_alloc_bytes 3.247952e+06 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge go_memstats_heap_idle_bytes 1.998848e+06 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge go_memstats_heap_inuse_bytes 5.7344e+06 # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge go_memstats_heap_objects 18185 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge go_memstats_heap_released_bytes 1.88416e+06 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge go_memstats_heap_sys_bytes 7.733248e+06 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge go_memstats_last_gc_time_seconds 1.724793883369717e+09 # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter go_memstats_lookups_total 0 # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter go_memstats_mallocs_total 39479 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge go_memstats_mcache_inuse_bytes 9600 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge go_memstats_mcache_sys_bytes 15600 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge go_memstats_mspan_inuse_bytes 137120 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge go_memstats_mspan_sys_bytes 146880 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge go_memstats_next_gc_bytes 5.69504e+06 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge go_memstats_other_sys_bytes 1.404724e+06 # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge go_memstats_stack_inuse_bytes 655360 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge go_memstats_stack_sys_bytes 655360 # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge go_memstats_sys_bytes 1.3126672e+07 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge go_threads 11 # HELP list_requests_total The total number of model list requests that have been attempted. # TYPE list_requests_total counter list_requests_total{action="",otel_scope_name="ollama",otel_scope_version="",status="",status_code="0"} 4 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ollama",otel_scope_version=""} 1 # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. # TYPE promhttp_metric_handler_requests_in_flight gauge promhttp_metric_handler_requests_in_flight 1 # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 1 promhttp_metric_handler_requests_total{code="500"} 0 promhttp_metric_handler_requests_total{code="503"} 0 # HELP requests_total The total number of requests on all endpoints. # TYPE requests_total counter requests_total{action="all",otel_scope_name="ollama",otel_scope_version="",status="OK",status_code="200"} 3 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="unknown_service:ollama",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="1.27.0"} 1 ```
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/6537/reactions", "total_count": 9, "+1": 9, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/6537/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/525
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/525/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/525/comments
https://api.github.com/repos/ollama/ollama/issues/525/events
https://github.com/ollama/ollama/pull/525
1,895,311,859
PR_kwDOJ0Z1Ps5aRvI3
525
fix: add falcon.go
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2023-09-13T21:48:00
2023-09-13T22:15:34
2023-09-13T22:08:47
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/525", "html_url": "https://github.com/ollama/ollama/pull/525", "diff_url": "https://github.com/ollama/ollama/pull/525.diff", "patch_url": "https://github.com/ollama/ollama/pull/525.patch", "merged_at": "2023-09-13T22:08:47" }
missed this file in #519
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/525/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/525/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4143
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4143/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4143/comments
https://api.github.com/repos/ollama/ollama/issues/4143/events
https://github.com/ollama/ollama/pull/4143
2,278,567,972
PR_kwDOJ0Z1Ps5uhUu-
4,143
omit prompt and generate settings from final response
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-05-03T23:22:38
2024-05-04T00:39:50
2024-05-04T00:39:49
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/4143", "html_url": "https://github.com/ollama/ollama/pull/4143", "diff_url": "https://github.com/ollama/ollama/pull/4143.diff", "patch_url": "https://github.com/ollama/ollama/pull/4143.patch", "merged_at": "2024-05-04T00:39:49" }
if the input is large, it might overrun the response buffer. there's no need to return the prompt since the caller has it already
{ "login": "mxyng", "id": 2372640, "node_id": "MDQ6VXNlcjIzNzI2NDA=", "avatar_url": "https://avatars.githubusercontent.com/u/2372640?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxyng", "html_url": "https://github.com/mxyng", "followers_url": "https://api.github.com/users/mxyng/followers", "following_url": "https://api.github.com/users/mxyng/following{/other_user}", "gists_url": "https://api.github.com/users/mxyng/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxyng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxyng/subscriptions", "organizations_url": "https://api.github.com/users/mxyng/orgs", "repos_url": "https://api.github.com/users/mxyng/repos", "events_url": "https://api.github.com/users/mxyng/events{/privacy}", "received_events_url": "https://api.github.com/users/mxyng/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4143/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4143/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/4700
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/4700/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/4700/comments
https://api.github.com/repos/ollama/ollama/issues/4700/events
https://github.com/ollama/ollama/issues/4700
2,322,994,450
I_kwDOJ0Z1Ps6KdhUS
4,700
please support minicpmv2.5
{ "login": "chaoqunxie", "id": 44899524, "node_id": "MDQ6VXNlcjQ0ODk5NTI0", "avatar_url": "https://avatars.githubusercontent.com/u/44899524?v=4", "gravatar_id": "", "url": "https://api.github.com/users/chaoqunxie", "html_url": "https://github.com/chaoqunxie", "followers_url": "https://api.github.com/users/chaoqunxie/followers", "following_url": "https://api.github.com/users/chaoqunxie/following{/other_user}", "gists_url": "https://api.github.com/users/chaoqunxie/gists{/gist_id}", "starred_url": "https://api.github.com/users/chaoqunxie/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chaoqunxie/subscriptions", "organizations_url": "https://api.github.com/users/chaoqunxie/orgs", "repos_url": "https://api.github.com/users/chaoqunxie/repos", "events_url": "https://api.github.com/users/chaoqunxie/events{/privacy}", "received_events_url": "https://api.github.com/users/chaoqunxie/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5789807732, "node_id": "LA_kwDOJ0Z1Ps8AAAABWRl0dA", "url": "https://api.github.com/repos/ollama/ollama/labels/model%20request", "name": "model request", "color": "1E5DE6", "default": false, "description": "Model requests" } ]
closed
false
null
[]
null
1
2024-05-29T10:43:54
2024-08-28T21:51:32
2024-08-28T21:51:32
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
this is github address https://github.com/OpenBMB/ollama
{ "login": "pdevine", "id": 75239, "node_id": "MDQ6VXNlcjc1MjM5", "avatar_url": "https://avatars.githubusercontent.com/u/75239?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pdevine", "html_url": "https://github.com/pdevine", "followers_url": "https://api.github.com/users/pdevine/followers", "following_url": "https://api.github.com/users/pdevine/following{/other_user}", "gists_url": "https://api.github.com/users/pdevine/gists{/gist_id}", "starred_url": "https://api.github.com/users/pdevine/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdevine/subscriptions", "organizations_url": "https://api.github.com/users/pdevine/orgs", "repos_url": "https://api.github.com/users/pdevine/repos", "events_url": "https://api.github.com/users/pdevine/events{/privacy}", "received_events_url": "https://api.github.com/users/pdevine/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/4700/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/4700/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/3717
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3717/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3717/comments
https://api.github.com/repos/ollama/ollama/issues/3717/events
https://github.com/ollama/ollama/pull/3717
2,249,492,119
PR_kwDOJ0Z1Ps5s_K_I
3,717
prompt to display and add local ollama keys to account
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-04-18T00:22:41
2024-04-30T18:02:09
2024-04-30T18:02:08
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3717", "html_url": "https://github.com/ollama/ollama/pull/3717", "diff_url": "https://github.com/ollama/ollama/pull/3717.diff", "patch_url": "https://github.com/ollama/ollama/pull/3717.patch", "merged_at": "2024-04-30T18:02:08" }
- return descriptive error messages when unauthorized to create blob or push a model - display the local public key associated with the request that was denied
{ "login": "BruceMacD", "id": 5853428, "node_id": "MDQ6VXNlcjU4NTM0Mjg=", "avatar_url": "https://avatars.githubusercontent.com/u/5853428?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BruceMacD", "html_url": "https://github.com/BruceMacD", "followers_url": "https://api.github.com/users/BruceMacD/followers", "following_url": "https://api.github.com/users/BruceMacD/following{/other_user}", "gists_url": "https://api.github.com/users/BruceMacD/gists{/gist_id}", "starred_url": "https://api.github.com/users/BruceMacD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BruceMacD/subscriptions", "organizations_url": "https://api.github.com/users/BruceMacD/orgs", "repos_url": "https://api.github.com/users/BruceMacD/repos", "events_url": "https://api.github.com/users/BruceMacD/events{/privacy}", "received_events_url": "https://api.github.com/users/BruceMacD/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3717/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3717/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/3701
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3701/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3701/comments
https://api.github.com/repos/ollama/ollama/issues/3701/events
https://github.com/ollama/ollama/issues/3701
2,248,596,294
I_kwDOJ0Z1Ps6GBttG
3,701
llama runner process no longer running: -1 CUDA error: CUBLAS_STATUS_EXECUTION_FAILED
{ "login": "holycrypto", "id": 30022286, "node_id": "MDQ6VXNlcjMwMDIyMjg2", "avatar_url": "https://avatars.githubusercontent.com/u/30022286?v=4", "gravatar_id": "", "url": "https://api.github.com/users/holycrypto", "html_url": "https://github.com/holycrypto", "followers_url": "https://api.github.com/users/holycrypto/followers", "following_url": "https://api.github.com/users/holycrypto/following{/other_user}", "gists_url": "https://api.github.com/users/holycrypto/gists{/gist_id}", "starred_url": "https://api.github.com/users/holycrypto/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/holycrypto/subscriptions", "organizations_url": "https://api.github.com/users/holycrypto/orgs", "repos_url": "https://api.github.com/users/holycrypto/repos", "events_url": "https://api.github.com/users/holycrypto/events{/privacy}", "received_events_url": "https://api.github.com/users/holycrypto/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
7
2024-04-17T15:35:56
2024-04-30T12:40:57
2024-04-30T12:40:57
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Follow the manual: https://github.com/ollama/ollama/blob/9df6c85c3a51ce00d6a65be9dd8a06af07b24af5/docs/tutorials/nvidia-jetson.md But run error: ```bash ollama run mistral-jetson ``` Errors ```bash Error: llama runner process no longer running: -1 CUDA error: CUBLAS_STATUS_EXECUTION_FAILED current device: 0, in function ggml_cuda_mul_mat_batched_cublas at /go/src/github.com/ollama/ollama/llm/llama.cpp/ggml-cuda.cu:1848 cublasGemmBatchedEx(ctx.cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, alpha, (const void **) (ptrs_src.get() + 0*ne23), CUDA_R_16F, nb01/nb00, (const void **) (ptrs_src.get() + 1*ne23), CUDA_R_16F, nb11/nb10, beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne01, ne23, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP) GGML_ASSERT: /go/src/github.com/ollama/ollama/llm/llama.cpp/ggml-cuda.cu:60: !"CUDA error" ``` ### What did you expect to see? _No response_ ### Steps to reproduce _No response_ ### Are there any recent changes that introduced the issue? _No response_ ### OS Linux ### Architecture arm64 ### Platform _No response_ ### Ollama version 0.1.32 ### GPU Nvidia ### GPU info +---------------------------------------------------------------------------------------+ | NVIDIA-SMI 540.2.0 Driver Version: N/A CUDA Version: 12.2 | |-----------------------------------------+----------------------+----------------------+ | GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |=========================================+======================+======================| | 0 Orin (nvgpu) N/A | N/A N/A | N/A | | N/A N/A N/A N/A / N/A | Not Supported | N/A N/A | | | | N/A | +-----------------------------------------+----------------------+----------------------+ +---------------------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=======================================================================================| | No running processes found | +---------------------------------------------------------------------------------------+ ### CPU _No response_ ### Other software _No response_
{ "login": "holycrypto", "id": 30022286, "node_id": "MDQ6VXNlcjMwMDIyMjg2", "avatar_url": "https://avatars.githubusercontent.com/u/30022286?v=4", "gravatar_id": "", "url": "https://api.github.com/users/holycrypto", "html_url": "https://github.com/holycrypto", "followers_url": "https://api.github.com/users/holycrypto/followers", "following_url": "https://api.github.com/users/holycrypto/following{/other_user}", "gists_url": "https://api.github.com/users/holycrypto/gists{/gist_id}", "starred_url": "https://api.github.com/users/holycrypto/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/holycrypto/subscriptions", "organizations_url": "https://api.github.com/users/holycrypto/orgs", "repos_url": "https://api.github.com/users/holycrypto/repos", "events_url": "https://api.github.com/users/holycrypto/events{/privacy}", "received_events_url": "https://api.github.com/users/holycrypto/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3701/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3701/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/5804
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5804/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5804/comments
https://api.github.com/repos/ollama/ollama/issues/5804/events
https://github.com/ollama/ollama/pull/5804
2,420,456,080
PR_kwDOJ0Z1Ps519tU_
5,804
Fix generate test flakyness
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-07-20T01:43:13
2024-07-20T02:11:27
2024-07-20T02:11:25
MEMBER
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5804", "html_url": "https://github.com/ollama/ollama/pull/5804", "diff_url": "https://github.com/ollama/ollama/pull/5804.diff", "patch_url": "https://github.com/ollama/ollama/pull/5804.patch", "merged_at": "2024-07-20T02:11:25" }
null
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5804/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5804/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/5465
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/5465/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/5465/comments
https://api.github.com/repos/ollama/ollama/issues/5465/events
https://github.com/ollama/ollama/pull/5465
2,389,139,548
PR_kwDOJ0Z1Ps50Wq42
5,465
Better nvidia GPU discovery logging
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
0
2024-07-03T17:38:42
2024-07-03T20:12:25
2024-07-03T20:12:22
COLLABORATOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/5465", "html_url": "https://github.com/ollama/ollama/pull/5465", "diff_url": "https://github.com/ollama/ollama/pull/5465.diff", "patch_url": "https://github.com/ollama/ollama/pull/5465.patch", "merged_at": "2024-07-03T20:12:22" }
Refine the way we log GPU discovery to improve the non-debug output, and report more actionable log messages when possible to help users troubleshoot on their own.
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/5465/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/5465/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1902
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1902/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1902/comments
https://api.github.com/repos/ollama/ollama/issues/1902/events
https://github.com/ollama/ollama/issues/1902
2,074,744,059
I_kwDOJ0Z1Ps57qhT7
1,902
Extremely slow memory allocation in WSL2 container
{ "login": "otavio-silva", "id": 22914610, "node_id": "MDQ6VXNlcjIyOTE0NjEw", "avatar_url": "https://avatars.githubusercontent.com/u/22914610?v=4", "gravatar_id": "", "url": "https://api.github.com/users/otavio-silva", "html_url": "https://github.com/otavio-silva", "followers_url": "https://api.github.com/users/otavio-silva/followers", "following_url": "https://api.github.com/users/otavio-silva/following{/other_user}", "gists_url": "https://api.github.com/users/otavio-silva/gists{/gist_id}", "starred_url": "https://api.github.com/users/otavio-silva/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/otavio-silva/subscriptions", "organizations_url": "https://api.github.com/users/otavio-silva/orgs", "repos_url": "https://api.github.com/users/otavio-silva/repos", "events_url": "https://api.github.com/users/otavio-silva/events{/privacy}", "received_events_url": "https://api.github.com/users/otavio-silva/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 5860134234, "node_id": "LA_kwDOJ0Z1Ps8AAAABXUqNWg", "url": "https://api.github.com/repos/ollama/ollama/labels/windows", "name": "windows", "color": "0052CC", "default": false, "description": "" } ]
closed
false
{ "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "login": "dhiltgen", "id": 4033016, "node_id": "MDQ6VXNlcjQwMzMwMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4033016?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dhiltgen", "html_url": "https://github.com/dhiltgen", "followers_url": "https://api.github.com/users/dhiltgen/followers", "following_url": "https://api.github.com/users/dhiltgen/following{/other_user}", "gists_url": "https://api.github.com/users/dhiltgen/gists{/gist_id}", "starred_url": "https://api.github.com/users/dhiltgen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhiltgen/subscriptions", "organizations_url": "https://api.github.com/users/dhiltgen/orgs", "repos_url": "https://api.github.com/users/dhiltgen/repos", "events_url": "https://api.github.com/users/dhiltgen/events{/privacy}", "received_events_url": "https://api.github.com/users/dhiltgen/received_events", "type": "User", "user_view_type": "public", "site_admin": false } ]
null
7
2024-01-10T16:33:52
2024-05-10T00:57:39
2024-05-10T00:57:39
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
# Description When trying to run ollama inside a container, memory allocation is extremely slow, something like 50 MB/s max. When in chat with the model, the container releases memory after some time if idle and if I run a prompt, it allocates it all over again. # Steps to reproduce 1. Run the command `podman run --device nvidia.com/gpu=all --security-opt label=disable --detach --volume .ollama:/root/.ollama -p 11434:11434 --name ollama-19 ollama/ollama:0.1.19` 2. Run the command `podman exec -it ollama-19 ollama run dolphin-mixtral` 3. Wait for several minutes # System info ``` Nome do host: GE76RAIDER Nome do sistema operacional: Microsoft Windows 11 Pro VersΓ£o do sistema operacional: 10.0.22631 N/A compilaΓ§Γ£o 22631 Fabricante do sistema operacional: Microsoft Corporation ConfiguraΓ§Γ£o do SO: EstaΓ§Γ£o de trabalho autΓ΄noma Tipo de compilaΓ§Γ£o do sistema operacional: Multiprocessor Free ProprietΓ‘rio registrado: otavioasilva@hotmail.com OrganizaΓ§Γ£o registrada: N/A IdentificaΓ§Γ£o do produto: 00330-80000-00000-AA520 Data da instalaΓ§Γ£o original: 02/08/2023, 14:30:14 Tempo de InicializaΓ§Γ£o do Sistema: 10/01/2024, 12:32:44 Fabricante do sistema: Micro-Star International Co., Ltd. Modelo do sistema: Raider GE76 12UHS Tipo de sistema: x64-based PC Processador(es): 1 processador(es) instalado(s). [01]: Intel64 Family 6 Model 154 Stepping 3 GenuineIntel ~2900 Mhz VersΓ£o do BIOS: American Megatrends International, LLC. E17K4IMS.20D, 26/06/2023 Pasta do Windows: C:\WINDOWS Pasta do sistema: C:\WINDOWS\system32 Inicializar dispositivo: \Device\HarddiskVolume1 Localidade do sistema: pt-br;PortuguΓͺs (Brasil) Localidade de entrada: en-us;InglΓͺs (Estados Unidos) Fuso horΓ‘rio: (UTC-03:00) BrasΓ­lia MemΓ³ria fΓ­sica total: 65.237 MB MemΓ³ria fΓ­sica disponΓ­vel: 46.571 MB MemΓ³ria Virtual: Tamanho MΓ‘ximo: 74.965 MB MemΓ³ria Virtual: DisponΓ­vel: 50.991 MB MemΓ³ria Virtual: Em Uso: 23.974 MB Local(is) de arquivo de paginaΓ§Γ£o: C:\pagefile.sys DomΓ­nio: WORKGROUP Servidor de Logon: \\GE76RAIDER Hotfix(es): 4 hotfix(es) instalado(s). [01]: KB5033920 [02]: KB5027397 [03]: KB5034123 [04]: KB5032393 Placa(s) de Rede: 3 NIC(s) instalado(s). [01]: Killer E3100G 2.5 Gigabit Ethernet Controller Nome da conexΓ£o: Ethernet Status: MΓ­dia desconectada [02]: Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW) Nome da conexΓ£o: Wi-Fi DHCP ativado: Sim Servidor DHCP: 192.168.1.1 EndereΓ§o(es) IP [01]: 192.168.1.26 [03]: TAP-Windows Adapter V9 Nome da conexΓ£o: TAP-Windows Status: MΓ­dia desconectada Requisitos do Hyper-V: Hipervisor detectado. Recursos necessΓ‘rios para o Hyper-V nΓ£o serΓ£o exibidos. ```
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1902/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/ollama/ollama/issues/1902/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/8616
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/8616/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/8616/comments
https://api.github.com/repos/ollama/ollama/issues/8616/events
https://github.com/ollama/ollama/issues/8616
2,813,973,892
I_kwDOJ0Z1Ps6nudWE
8,616
Ollama: torch.OutOfMemoryError: CUDA out of memory
{ "login": "kennethwork101", "id": 147571330, "node_id": "U_kgDOCMvCgg", "avatar_url": "https://avatars.githubusercontent.com/u/147571330?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kennethwork101", "html_url": "https://github.com/kennethwork101", "followers_url": "https://api.github.com/users/kennethwork101/followers", "following_url": "https://api.github.com/users/kennethwork101/following{/other_user}", "gists_url": "https://api.github.com/users/kennethwork101/gists{/gist_id}", "starred_url": "https://api.github.com/users/kennethwork101/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kennethwork101/subscriptions", "organizations_url": "https://api.github.com/users/kennethwork101/orgs", "repos_url": "https://api.github.com/users/kennethwork101/repos", "events_url": "https://api.github.com/users/kennethwork101/events{/privacy}", "received_events_url": "https://api.github.com/users/kennethwork101/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
0
2025-01-27T20:21:48
2025-01-27T20:21:48
null
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? Running some tests using pytest with the following 6 models. What I find is that if I run all tests with each model before go on to the next model, the tests mostly worked fine. 123/126 passed. But if I run each test against all 6 models sequentially and then go to the next test then I see hangs or out of memory error. Is this a known issue? I expect the order of running tests using Ollama should not matter. ollama version is 0.5.7 pytest 8.3.4 | NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | | 0 NVIDIA GeForce RTX 4070 Ti Off | 00000000:01:00.0 On | N/A | Linux kennethpc 6.8.0-51-generic #52-Ubuntu SMP PREEMPT_DYNAMIC Thu Dec 5 13:09:44 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux qwen2:latest qwen2.5:latest mistral:latest llama3-groq-tool-use:latest llama3.2:latest llama3.2:latest Here is some examples of the errors. Sometimes I simply see hangs: FAILED tests/_1_misc_test.py::test_t6_func[mistral:latest] - assert None is not None FAILED tests/_1_misc_test.py::test_t6_func[llama3-groq-tool-use:latest] - assert None is not None FAILED tests/_2_rag_test.py::test_t7_func[qwen2:latest-chroma] - assert 768 == 384 FAILED tests/_2_rag_test.py::test_t7_func[qwen2.5:latest-chroma] - torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 0 has a total capacity of 11.72 GiB of which 41.50 MiB is free. Process 263255 has 3.40 GiB memory in use. Process 263532 has 6.09 GiB memory in use. In... FAILED tests/_2_rag_test.py::test_t7_func[qwen2.5:latest-huggingface] - torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 0 has a total capacity of 11.72 GiB of which 41.50 MiB is free. Process 263255 has 3.40 GiB memory in use. Process 263532 has 6.09 GiB memory in use. In... ### OS Linux ### GPU Nvidia ### CPU Intel ### Ollama version 0.5.7
null
{ "url": "https://api.github.com/repos/ollama/ollama/issues/8616/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/8616/timeline
null
null
false
https://api.github.com/repos/ollama/ollama/issues/3847
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/3847/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/3847/comments
https://api.github.com/repos/ollama/ollama/issues/3847/events
https://github.com/ollama/ollama/pull/3847
2,259,435,609
PR_kwDOJ0Z1Ps5tgWCg
3,847
Update linux.md
{ "login": "moresearch", "id": 111041768, "node_id": "U_kgDOBp5c6A", "avatar_url": "https://avatars.githubusercontent.com/u/111041768?v=4", "gravatar_id": "", "url": "https://api.github.com/users/moresearch", "html_url": "https://github.com/moresearch", "followers_url": "https://api.github.com/users/moresearch/followers", "following_url": "https://api.github.com/users/moresearch/following{/other_user}", "gists_url": "https://api.github.com/users/moresearch/gists{/gist_id}", "starred_url": "https://api.github.com/users/moresearch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/moresearch/subscriptions", "organizations_url": "https://api.github.com/users/moresearch/orgs", "repos_url": "https://api.github.com/users/moresearch/repos", "events_url": "https://api.github.com/users/moresearch/events{/privacy}", "received_events_url": "https://api.github.com/users/moresearch/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-04-23T17:49:12
2024-05-06T22:02:29
2024-05-06T22:02:25
CONTRIBUTOR
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/3847", "html_url": "https://github.com/ollama/ollama/pull/3847", "diff_url": "https://github.com/ollama/ollama/pull/3847.diff", "patch_url": "https://github.com/ollama/ollama/pull/3847.patch", "merged_at": "2024-05-06T22:02:25" }
Add -e to viewing logs in order to show end of ollama logs
{ "login": "jmorganca", "id": 251292, "node_id": "MDQ6VXNlcjI1MTI5Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/251292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmorganca", "html_url": "https://github.com/jmorganca", "followers_url": "https://api.github.com/users/jmorganca/followers", "following_url": "https://api.github.com/users/jmorganca/following{/other_user}", "gists_url": "https://api.github.com/users/jmorganca/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmorganca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmorganca/subscriptions", "organizations_url": "https://api.github.com/users/jmorganca/orgs", "repos_url": "https://api.github.com/users/jmorganca/repos", "events_url": "https://api.github.com/users/jmorganca/events{/privacy}", "received_events_url": "https://api.github.com/users/jmorganca/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/3847/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/3847/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/7996
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/7996/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/7996/comments
https://api.github.com/repos/ollama/ollama/issues/7996/events
https://github.com/ollama/ollama/issues/7996
2,724,965,068
I_kwDOJ0Z1Ps6ia6rM
7,996
Less available memory than expected
{ "login": "tie-pilot-qxw", "id": 113431004, "node_id": "U_kgDOBsLR3A", "avatar_url": "https://avatars.githubusercontent.com/u/113431004?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tie-pilot-qxw", "html_url": "https://github.com/tie-pilot-qxw", "followers_url": "https://api.github.com/users/tie-pilot-qxw/followers", "following_url": "https://api.github.com/users/tie-pilot-qxw/following{/other_user}", "gists_url": "https://api.github.com/users/tie-pilot-qxw/gists{/gist_id}", "starred_url": "https://api.github.com/users/tie-pilot-qxw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tie-pilot-qxw/subscriptions", "organizations_url": "https://api.github.com/users/tie-pilot-qxw/orgs", "repos_url": "https://api.github.com/users/tie-pilot-qxw/repos", "events_url": "https://api.github.com/users/tie-pilot-qxw/events{/privacy}", "received_events_url": "https://api.github.com/users/tie-pilot-qxw/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396184, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aWA", "url": "https://api.github.com/repos/ollama/ollama/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 6677367769, "node_id": "LA_kwDOJ0Z1Ps8AAAABjgCL2Q", "url": "https://api.github.com/repos/ollama/ollama/labels/needs%20more%20info", "name": "needs more info", "color": "BA8041", "default": false, "description": "More information is needed to assist" } ]
closed
false
null
[]
null
5
2024-12-08T05:31:08
2024-12-23T08:07:38
2024-12-23T08:07:38
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
### What is the issue? I'm using a 4060 laptop GPU, which has 8GB of memory. However, from the log, it showed that `library=cuda variant=v12 compute=8.9 driver=12.7 name="NVIDIA GeForce RTX 4060 Laptop GPU" total="8.0 GiB" available="6.9 GiB"` That's quite strange for from `nvidia-smi` ![image](https://github.com/user-attachments/assets/f7e61eb4-ee66-4927-aa71-295a095c5bb0) Actually, when I tried to run llama3.2 3b fp16, ollama offloads 10% of the layers to the CPU, while from `ollama ps`, the needed memory is 7.9GB, which should be able to fit in the GPU memory ### OS WSL2 ### GPU Nvidia ### CPU Intel ### Ollama version 0.4.7
{ "login": "rick-github", "id": 14946854, "node_id": "MDQ6VXNlcjE0OTQ2ODU0", "avatar_url": "https://avatars.githubusercontent.com/u/14946854?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rick-github", "html_url": "https://github.com/rick-github", "followers_url": "https://api.github.com/users/rick-github/followers", "following_url": "https://api.github.com/users/rick-github/following{/other_user}", "gists_url": "https://api.github.com/users/rick-github/gists{/gist_id}", "starred_url": "https://api.github.com/users/rick-github/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rick-github/subscriptions", "organizations_url": "https://api.github.com/users/rick-github/orgs", "repos_url": "https://api.github.com/users/rick-github/repos", "events_url": "https://api.github.com/users/rick-github/events{/privacy}", "received_events_url": "https://api.github.com/users/rick-github/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/7996/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/7996/timeline
null
not_planned
false
https://api.github.com/repos/ollama/ollama/issues/1959
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1959/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1959/comments
https://api.github.com/repos/ollama/ollama/issues/1959/events
https://github.com/ollama/ollama/pull/1959
2,079,489,803
PR_kwDOJ0Z1Ps5j9ckG
1,959
Add ollama sync command
{ "login": "puffo", "id": 4732941, "node_id": "MDQ6VXNlcjQ3MzI5NDE=", "avatar_url": "https://avatars.githubusercontent.com/u/4732941?v=4", "gravatar_id": "", "url": "https://api.github.com/users/puffo", "html_url": "https://github.com/puffo", "followers_url": "https://api.github.com/users/puffo/followers", "following_url": "https://api.github.com/users/puffo/following{/other_user}", "gists_url": "https://api.github.com/users/puffo/gists{/gist_id}", "starred_url": "https://api.github.com/users/puffo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/puffo/subscriptions", "organizations_url": "https://api.github.com/users/puffo/orgs", "repos_url": "https://api.github.com/users/puffo/repos", "events_url": "https://api.github.com/users/puffo/events{/privacy}", "received_events_url": "https://api.github.com/users/puffo/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
5
2024-01-12T18:51:03
2024-01-25T18:21:20
2024-01-25T18:20:50
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
false
{ "url": "https://api.github.com/repos/ollama/ollama/pulls/1959", "html_url": "https://github.com/ollama/ollama/pull/1959", "diff_url": "https://github.com/ollama/ollama/pull/1959.diff", "patch_url": "https://github.com/ollama/ollama/pull/1959.patch", "merged_at": null }
I frequently need to pull the latest version of models I've already downloaded. Taking inspiration the comments and suggestions in https://github.com/jmorganca/ollama/issues/1890, I've implemented a basic `sync` command to streamline this process. ```bash ollama sync ```
{ "login": "puffo", "id": 4732941, "node_id": "MDQ6VXNlcjQ3MzI5NDE=", "avatar_url": "https://avatars.githubusercontent.com/u/4732941?v=4", "gravatar_id": "", "url": "https://api.github.com/users/puffo", "html_url": "https://github.com/puffo", "followers_url": "https://api.github.com/users/puffo/followers", "following_url": "https://api.github.com/users/puffo/following{/other_user}", "gists_url": "https://api.github.com/users/puffo/gists{/gist_id}", "starred_url": "https://api.github.com/users/puffo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/puffo/subscriptions", "organizations_url": "https://api.github.com/users/puffo/orgs", "repos_url": "https://api.github.com/users/puffo/repos", "events_url": "https://api.github.com/users/puffo/events{/privacy}", "received_events_url": "https://api.github.com/users/puffo/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1959/reactions", "total_count": 5, "+1": 5, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1959/timeline
null
null
true
https://api.github.com/repos/ollama/ollama/issues/1945
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/1945/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/1945/comments
https://api.github.com/repos/ollama/ollama/issues/1945/events
https://github.com/ollama/ollama/issues/1945
2,078,206,380
I_kwDOJ0Z1Ps573ums
1,945
Donation
{ "login": "reddec", "id": 6597086, "node_id": "MDQ6VXNlcjY1OTcwODY=", "avatar_url": "https://avatars.githubusercontent.com/u/6597086?v=4", "gravatar_id": "", "url": "https://api.github.com/users/reddec", "html_url": "https://github.com/reddec", "followers_url": "https://api.github.com/users/reddec/followers", "following_url": "https://api.github.com/users/reddec/following{/other_user}", "gists_url": "https://api.github.com/users/reddec/gists{/gist_id}", "starred_url": "https://api.github.com/users/reddec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/reddec/subscriptions", "organizations_url": "https://api.github.com/users/reddec/orgs", "repos_url": "https://api.github.com/users/reddec/repos", "events_url": "https://api.github.com/users/reddec/events{/privacy}", "received_events_url": "https://api.github.com/users/reddec/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[]
closed
false
null
[]
null
1
2024-01-12T07:08:11
2024-01-25T23:09:03
2024-01-25T23:09:03
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
Hi there! I really, REALLY love the product. Ollama made LLM usage as simple as docker years ago did for containers. Maybe it's time for donation button? Can't promise much, but at least I can show my appreciation.
{ "login": "mchiang0610", "id": 3325447, "node_id": "MDQ6VXNlcjMzMjU0NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/3325447?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mchiang0610", "html_url": "https://github.com/mchiang0610", "followers_url": "https://api.github.com/users/mchiang0610/followers", "following_url": "https://api.github.com/users/mchiang0610/following{/other_user}", "gists_url": "https://api.github.com/users/mchiang0610/gists{/gist_id}", "starred_url": "https://api.github.com/users/mchiang0610/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mchiang0610/subscriptions", "organizations_url": "https://api.github.com/users/mchiang0610/orgs", "repos_url": "https://api.github.com/users/mchiang0610/repos", "events_url": "https://api.github.com/users/mchiang0610/events{/privacy}", "received_events_url": "https://api.github.com/users/mchiang0610/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/1945/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/1945/timeline
null
completed
false
https://api.github.com/repos/ollama/ollama/issues/2597
https://api.github.com/repos/ollama/ollama
https://api.github.com/repos/ollama/ollama/issues/2597/labels{/name}
https://api.github.com/repos/ollama/ollama/issues/2597/comments
https://api.github.com/repos/ollama/ollama/issues/2597/events
https://github.com/ollama/ollama/issues/2597
2,142,884,059
I_kwDOJ0Z1Ps5_udDb
2,597
Suggestion - Custom Model Actions
{ "login": "Subie1", "id": 133152722, "node_id": "U_kgDOB--_0g", "avatar_url": "https://avatars.githubusercontent.com/u/133152722?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Subie1", "html_url": "https://github.com/Subie1", "followers_url": "https://api.github.com/users/Subie1/followers", "following_url": "https://api.github.com/users/Subie1/following{/other_user}", "gists_url": "https://api.github.com/users/Subie1/gists{/gist_id}", "starred_url": "https://api.github.com/users/Subie1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Subie1/subscriptions", "organizations_url": "https://api.github.com/users/Subie1/orgs", "repos_url": "https://api.github.com/users/Subie1/repos", "events_url": "https://api.github.com/users/Subie1/events{/privacy}", "received_events_url": "https://api.github.com/users/Subie1/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
[ { "id": 5667396200, "node_id": "LA_kwDOJ0Z1Ps8AAAABUc2aaA", "url": "https://api.github.com/repos/ollama/ollama/labels/feature%20request", "name": "feature request", "color": "a2eeef", "default": false, "description": "New feature or request" }, { "id": 7706482389, "node_id": "LA_kwDOJ0Z1Ps8AAAABy1eW1Q", "url": "https://api.github.com/repos/ollama/ollama/labels/api", "name": "api", "color": "bfdadc", "default": false, "description": "" } ]
closed
false
null
[]
null
1
2024-02-19T17:42:42
2024-12-10T18:11:03
2024-12-10T18:11:03
NONE
{ "total": 0, "completed": 0, "percent_completed": 0 }
null
null
null
# Providing an LLM with actions! - Putting it simply just giving an LLM the power to have actions it can use which the developer programs themselves. ### The request: - The request that is sent & provides the LLM with the knowledge of it having actions it can use. ```shell curl http://localhost:11434/api/generate -d '{ "model": "llama2", "system": "You are a librarian", "prompt": "How many books do you have?", "actions": [{ "name": "get_books", "description": "Get the amount of books you currently have" }] }' ``` ### The response: - The simple response which the user detects for a string in the response ```json { "model": "llama2", "created_at": "2023-08-04T08:52:19.385406455-07:00", "response": "Sure! Here is how many books I have {\"action\":\"get_books\"}" } ``` - The less simple response but way more intuitive to work with where the actions it tried to use are sent in an array and `%0` in the response corresponds to the relation between the first element of the array `"get_books"` having an index of `0` ```json { "model": "llama2", "created_at": "2023-08-04T08:52:19.385406455-07:00", "response": "Sure! Here is how many books I have %0", "actions": ["get_books"] } ``` ### Creating actions - The format for an action is a simple JSON that explains everything to the LLM: ```js { "name": "get_books", // [REQUIRED] This field is what the LLM will send when it wants to use the action. "description": "Get the amount of books you currently have", // [REQUIRED] This field will explain to the LLM what the action does when used. "type": "response" // [OPTIONAL] This field explains to the LLM if the response from the action will be added to the response (For example the 0%) or if it'll be an action that doesn't give out a response. (default "response") (values "response"|"silent") } ``` #### Actions could also be created in a Modelfile: ``` FROM llama2 SYSTEM """ You are a librarian """ ACTIONS """ [ { "name": "get_books", // [REQUIRED] This field is what the LLM will send when it wants to use the action. "description": "Get the amount of books you currently have", // [REQUIRED] This field will explain to the LLM what the action does when used. "type": "response" // [OPTIONAL] This field explains to the LLM if the response from the action will be added to the response (For example the 0%) or if it'll be an action that doesn't give out a response. (default "response") (values "response"|"silent") } ] """ ```
{ "login": "Subie1", "id": 133152722, "node_id": "U_kgDOB--_0g", "avatar_url": "https://avatars.githubusercontent.com/u/133152722?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Subie1", "html_url": "https://github.com/Subie1", "followers_url": "https://api.github.com/users/Subie1/followers", "following_url": "https://api.github.com/users/Subie1/following{/other_user}", "gists_url": "https://api.github.com/users/Subie1/gists{/gist_id}", "starred_url": "https://api.github.com/users/Subie1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Subie1/subscriptions", "organizations_url": "https://api.github.com/users/Subie1/orgs", "repos_url": "https://api.github.com/users/Subie1/repos", "events_url": "https://api.github.com/users/Subie1/events{/privacy}", "received_events_url": "https://api.github.com/users/Subie1/received_events", "type": "User", "user_view_type": "public", "site_admin": false }
{ "url": "https://api.github.com/repos/ollama/ollama/issues/2597/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/ollama/ollama/issues/2597/timeline
null
completed
false