File size: 1,763 Bytes
049f5e1 cbbf08b 0a48207 1155b2a 3bc8e66 9db22a0 fd513e7 049f5e1 b09a430 1155b2a 049f5e1 e3bca2d 159caed 0a48207 ec93588 6e979ab ec93588 6e979ab 402dcff b09a430 3bc8e66 159caed 80215ba 159caed e3bca2d ec93588 830af78 ec93588 159caed e8ebe51 159caed 1c028c8 642818c f8b79dc fd513e7 7aa2c66 f8b79dc fd513e7 7aa2c66 24c42d1 806e5d1 24c42d1 159caed 24c42d1 806e5d1 24c42d1 159caed 60482be 159caed 24c42d1 8fadc1e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import streamlit as st
import pandas as pd
import requests
import os
import time
from datetime import datetime
import humanize
st.header("List Ollama model downloads")
HOST = os.getenv("HOST", "https://1tsnakers-ollamasearchapi.hf.space")
def is_api_up():
try:
r = requests.get(f"{HOST}/ping")
return r.status_code == 200
except:
return False
namespace = st.text_input(
"Namespace",
value="library"
)
if st.button("Make list"):
start_time = time.time()
if not is_api_up():
st.error("API is not reachable")
else:
if not namespace:
st.warning("Namespace cannot be empty!")
st.stop()
r = requests.get(f"{HOST}/{namespace}")
library = r.json()["results"]
models = [model["model_base_name"] for model in library]
pull_count = [int(model["pull_count"]) for model in library]
date = [datetime.fromisoformat(model["last_updated_iso"]) for model in library]
total_time = time.time() - start_time
total_time = round(total_time, 3)
cache_time = datetime.fromisoformat(r.json()["cached_at"]) - time.time()
st.write(f"{len(models)} models counted in {total_time} seconds")
st.write(f"Cache expires in {humanize.naturaltime(cache_time)}")
df = pd.DataFrame({
"model": models,
"pull_count": pull_count,
"date": date
})
st.dataframe(
df,
hide_index=True,
column_config={
"model": "AI Model",
"Pulls": st.column_config.NumberColumn("Pulls"),
"date": st.column_config.DateColumn("Last Updated")
}
)
|