Spestly commited on
Commit
9bc2f28
·
verified ·
1 Parent(s): f68537f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -25
app.py CHANGED
@@ -14,13 +14,13 @@ if not os.path.exists(AI_PFP) or not os.path.exists(USER_PFP):
14
  st.stop()
15
 
16
  model_info = {
17
- "c4ai-aya-expanse-8b": {"description": "Aya Expanse is a highly performant 8B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "4K", "output": "4K"},
18
- "c4ai-aya-expanse-32b": {"description": "Aya Expanse is a highly performant 32B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "128K", "output": "4K"},
19
- "c4ai-aya-vision-8b": {"description": "Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. This 8 billion parameter variant is focused on low latency and best-in-class performance.", "context": "16K", "output": "4K"},
20
- "c4ai-aya-vision-32b": {"description": "Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. Serves 23 languages. This 32 billion parameter variant is focused on state-of-art multilingual performance.", "context": "16k", "output": "4K"},
21
- "command-a-03-2025": {"description": "Command A is our most performant model to date, excelling at tool use, agents, retrieval augmented generation (RAG), and multilingual use cases. Command A has a context length of 256K, only requires two GPUs to run, and has 150% higher throughput compared to Command R+ 08-2024.", "context": "256K", "output": "8K"},
22
- "command-r7b-12-2024": {"description": "command-r7b-12-2024 is a small, fast update delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.", "context": "128K", "output": "4K"},
23
- "command-r-plus-04-2024": {"description": "Command R+ is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It is best suited for complex RAG workflows and multi-step tool use.", "context": "128K", "output": "4K"},
24
  }
25
 
26
  with st.sidebar:
@@ -32,7 +32,7 @@ with st.sidebar:
32
  if st.button("Clear Chat"):
33
  st.session_state.messages = []
34
  st.session_state.first_message_sent = False
35
- st.rerun()
36
  st.divider()
37
  st.image(AI_PFP, width=60)
38
  st.subheader(selected_model)
@@ -46,20 +46,26 @@ if "messages" not in st.session_state:
46
  if "first_message_sent" not in st.session_state:
47
  st.session_state.first_message_sent = False
48
 
49
- if not st.session_state.first_message_sent:
50
- st.markdown("<h1 style='text-align: center; color: #4a4a4a; margin-top: 100px;'>How can Cohere help you today?</h1>", unsafe_allow_html=True)
51
- for msg in st.session_state.messages:
52
- with st.chat_message(msg["role"], avatar=USER_PFP if msg["role"] == "user" else AI_PFP):
53
- st.markdown(msg["content"])
 
 
 
 
 
 
54
 
55
- col1, col2 = st.columns([1, 4])
56
- with col1:
57
- if selected_model.startswith("c4ai-aya-vision"):
58
- uploaded = st.file_uploader("Upload image", type=["png", "jpg", "jpeg"], key="image_uploader")
59
- else:
60
- uploaded = None
61
- with col2:
62
- prompt = st.chat_input("Message...")
63
 
64
  if prompt:
65
  if not api_key:
@@ -72,20 +78,20 @@ if prompt:
72
 
73
  try:
74
  co = cohere.ClientV2(api_key)
75
- user_message = [{"type": "text", "text": prompt}]
76
  if uploaded:
77
  raw = uploaded.read()
78
  b64 = base64.b64encode(raw).decode("utf-8")
79
  data_url = f"data:image/jpeg;base64,{b64}"
80
- user_message.append({"type": "image_url", "image_url": {"url": data_url}})
81
  response = co.chat(
82
  model=selected_model,
83
- messages=[{"role": "user", "content": user_message}]
84
  )
85
  content_items = response.message.content
86
  reply = "".join(getattr(item, 'text', '') for item in content_items)
 
87
  with st.chat_message("assistant", avatar=AI_PFP):
88
  st.markdown(reply)
89
- st.session_state.messages.append({"role": "assistant", "content": reply})
90
  except Exception as e:
91
  st.error(f"Error: {str(e)}")
 
14
  st.stop()
15
 
16
  model_info = {
17
+ "c4ai-aya-expanse-8b": {"description": "Aya Expanse is a highly performant 8B multilingual model...", "context": "4K", "output": "4K"},
18
+ "c4ai-aya-expanse-32b": {"description": "Aya Expanse is a highly performant 32B multilingual model...", "context": "128K", "output": "4K"},
19
+ "command-a-03-2025": {"description": "Command A is our most performant model to date...", "context": "256K", "output": "8K"},
20
+ "command-r7b-12-2024": {"description": "command-r7b-12-2024 is a small, fast update...", "context": "128K", "output": "4K"},
21
+ "command-r-plus-04-2024": {"description": "Command R+ is an instruction-following conversational model...", "context": "128K", "output": "4K"},
22
+ "c4ai-aya-vision-8b": {"description": "Aya Vision is an 8B vision-language model...", "context": "4K", "output": "4K"},
23
+ "c4ai-aya-vision-32b": {"description": "Aya Vision is a 32B vision-language model...", "context": "128K", "output": "4K"}
24
  }
25
 
26
  with st.sidebar:
 
32
  if st.button("Clear Chat"):
33
  st.session_state.messages = []
34
  st.session_state.first_message_sent = False
35
+ st.experimental_rerun()
36
  st.divider()
37
  st.image(AI_PFP, width=60)
38
  st.subheader(selected_model)
 
46
  if "first_message_sent" not in st.session_state:
47
  st.session_state.first_message_sent = False
48
 
49
+ main = st.container()
50
+ with main:
51
+ if not st.session_state.first_message_sent:
52
+ st.markdown(
53
+ "<h1 style='text-align:center; color:#4a4a4a; margin-top:100px;'>How can Cohere help you today?</h1>",
54
+ unsafe_allow_html=True
55
+ )
56
+ for msg in st.session_state.messages:
57
+ avatar = USER_PFP if msg["role"] == "user" else AI_PFP
58
+ with st.chat_message(msg["role"], avatar=avatar):
59
+ st.markdown(msg["content"])
60
 
61
+ col1, col2 = st.columns([1, 4])
62
+ with col1:
63
+ if selected_model.startswith("c4ai-aya-vision"):
64
+ uploaded = st.file_uploader("Upload image", type=["png", "jpg", "jpeg"])
65
+ else:
66
+ uploaded = None
67
+ with col2:
68
+ prompt = st.chat_input("Message...")
69
 
70
  if prompt:
71
  if not api_key:
 
78
 
79
  try:
80
  co = cohere.ClientV2(api_key)
81
+ user_content = [{"type": "text", "text": prompt}]
82
  if uploaded:
83
  raw = uploaded.read()
84
  b64 = base64.b64encode(raw).decode("utf-8")
85
  data_url = f"data:image/jpeg;base64,{b64}"
86
+ user_content.append({"type": "image_url", "image_url": {"url": data_url}})
87
  response = co.chat(
88
  model=selected_model,
89
+ messages=[{"role": "user", "content": user_content}]
90
  )
91
  content_items = response.message.content
92
  reply = "".join(getattr(item, 'text', '') for item in content_items)
93
+ st.session_state.messages.append({"role": "assistant", "content": reply})
94
  with st.chat_message("assistant", avatar=AI_PFP):
95
  st.markdown(reply)
 
96
  except Exception as e:
97
  st.error(f"Error: {str(e)}")