File size: 5,740 Bytes
2a88707
 
34d0739
f68537f
a05fede
34d0739
a05fede
34d0739
 
92d7276
2a88707
34d0739
 
 
2a88707
442574e
34d0739
442574e
 
 
 
 
 
 
2a88707
 
442574e
2a88707
f068886
0a61355
92d7276
2a88707
34d0739
77ae47f
e88412a
 
77ae47f
2a88707
34d0739
 
 
 
 
e88412a
2a88707
 
 
e88412a
 
77ae47f
 
e88412a
77ae47f
 
 
 
 
a05fede
77ae47f
 
 
 
f68537f
77ae47f
 
 
 
 
 
 
 
 
 
 
 
 
2a88707
34d0739
2a88707
77ae47f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a88707
 
77ae47f
 
34d0739
7d158e2
77ae47f
2a88707
77ae47f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import streamlit as st
import cohere
import os
import base64

st.set_page_config(page_title="Cohere Chat", layout="wide")

AI_PFP = "media/pfps/cohere-pfp.png"
USER_PFP = "media/pfps/user-pfp.jpg"
BANNER = "media/banner.png"

if not os.path.exists(AI_PFP) or not os.path.exists(USER_PFP):
    st.error("Missing profile pictures in media/pfps directory")
    st.stop()


model_info = {
    "c4ai-aya-expanse-8b": {"description": "Aya Expanse is a highly performant 8B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "4K", "output": "4K"},
    "c4ai-aya-expanse-32b": {"description": "Aya Expanse is a highly performant 32B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "128K", "output": "4K"},
    "c4ai-aya-vision-8b": {"description": "Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. This 8 billion parameter variant is focused on low latency and best-in-class performance.", "context": "16K", "output": "4K"},
    "c4ai-aya-vision-32b": {"description": "Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. Serves 23 languages. This 32 billion parameter variant is focused on state-of-art multilingual performance.", "context": "16k", "output": "4K"},
    "command-a-03-2025": {"description": "Command A is our most performant model to date, excelling at tool use, agents, retrieval augmented generation (RAG), and multilingual use cases. Command A has a context length of 256K, only requires two GPUs to run, and has 150% higher throughput compared to Command R+ 08-2024.", "context": "256K", "output": "8K"},
    "command-r7b-12-2024": {"description": "command-r7b-12-2024 is a small, fast update delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.", "context": "128K", "output": "4K"},
    "command-r-plus-04-2024": {"description": "Command R+ is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It is best suited for complex RAG workflows and multi-step tool use.", "context": "128K", "output": "4K"},
}


with st.sidebar:
    st.image(BANNER, use_container_width=True)
    st.markdown("Hugging Face πŸ€— Community UI (Vision Model support coming soon)")
    st.title("Settings")
    api_key = st.text_input("Cohere API Key", type="password")
    selected_model = st.selectbox("Model", options=list(model_info.keys()))
    def clear_chat():
        st.session_state.messages = []
        st.session_state.first_message_sent = False
    st.button("Clear Chat", on_click=clear_chat)
    st.divider()
    st.image(AI_PFP, width=60)
    st.subheader(selected_model)
    st.markdown(model_info[selected_model]["description"])
    st.caption(f"Context: {model_info[selected_model]['context']}")
    st.caption(f"Output: {model_info[selected_model]['output']}")
    st.markdown("Powered by Cohere's API")

if "messages" not in st.session_state:
    st.session_state.messages = []
if "first_message_sent" not in st.session_state:
    st.session_state.first_message_sent = False
if "uploaded_image" not in st.session_state:
    st.session_state.uploaded_image = None

if not st.session_state.first_message_sent:
    st.markdown(
        "<h1 style='text-align:center; color:#4a4a4a; margin-top:100px;'>How can Cohere help you today?</h1>",
        unsafe_allow_html=True
    )

for msg in st.session_state.messages:
    avatar = USER_PFP if msg["role"] == "user" else AI_PFP
    with st.chat_message(msg["role"], avatar=avatar):
        st.markdown(msg["content"])

col1, col2 = st.columns([1, 8])
with col1:
    if selected_model.startswith("c4ai-aya-vision"):
        img = st.file_uploader(label="πŸ“·", key="uploader", type=["png","jpg","jpeg"], accept_multiple_files=False)
        if img is not None:
            st.session_state.uploaded_image = img
            st.image(img, width=80)
    else:
        st.write("")
with col2:
    prompt = st.chat_input("Message...")

if prompt or st.session_state.uploaded_image:
    if not api_key:
        st.error("API key required")
        st.stop()
    user_items = []
    if prompt:
        st.session_state.first_message_sent = True
        st.session_state.messages.append({"role": "user", "content": prompt})
        with st.chat_message("user", avatar=USER_PFP):
            st.markdown(prompt)
        user_items.append({"type": "text", "text": prompt})
    if st.session_state.uploaded_image:
        raw = st.session_state.uploaded_image.read()
        b64 = base64.b64encode(raw).decode("utf-8")
        url = f"data:image/jpeg;base64,{b64}"
        user_items.append({"type": "image_url", "image_url": {"url": url}})
        with st.chat_message("user", avatar=USER_PFP):
            st.image(raw, width=200)
        st.session_state.uploaded_image = None
    try:
        co = cohere.ClientV2(api_key)
        response = co.chat(model=selected_model, messages=[{"role":"user","content":user_items}])
        reply = "".join(getattr(item,'text','') for item in response.message.content)
        with st.chat_message("assistant", avatar=AI_PFP):
            st.markdown(reply)
        st.session_state.messages.append({"role":"assistant","content":reply})
    except Exception as e:
        st.error(f"Error: {e}")