Spaces:
Build error
Build error
Logo as picture, not link
Browse files
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import streamlit as st
|
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
import os
|
| 4 |
import sys
|
|
|
|
| 5 |
|
| 6 |
st.title("SmallZOO ChatBot 3B")
|
| 7 |
|
|
@@ -18,20 +19,20 @@ model_info ={
|
|
| 18 |
"Llama-3.2 [3B]":
|
| 19 |
{'description':"""The Llama-3.2 3B Instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
| 20 |
\nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \n""",
|
| 21 |
-
'logo':'
|
| 22 |
'url':'https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct'},
|
| 23 |
|
| 24 |
"Qwen2.5 [3B]":
|
| 25 |
{'description':"""The Qwen2.5 3B Instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
| 26 |
\nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \\n""",
|
| 27 |
-
'logo':'
|
| 28 |
'url':'https://huggingface.co/Qwen/Qwen2.5-3B-Instruct'},
|
| 29 |
|
| 30 |
"Phi-3.5 [3.82B]":
|
| 31 |
-
|
| 32 |
\nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \ \n""",
|
| 33 |
-
|
| 34 |
-
|
| 35 |
}
|
| 36 |
|
| 37 |
def format_promt(message, custom_instructions=None):
|
|
@@ -88,9 +89,9 @@ st.sidebar.button('Reset Chat', on_click=reset_conversation)
|
|
| 88 |
|
| 89 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
| 90 |
st.sidebar.markdown(model_info[selected_model]['description'])
|
| 91 |
-
st.sidebar.
|
| 92 |
st.sidebar.markdown(f"[View model on 🤗 Hugging Face]({model_info[selected_model]['url']})")
|
| 93 |
-
st.sidebar.markdown("*Generated content can be inaccurate, offensive or non-factual!!!*")
|
| 94 |
|
| 95 |
if "prev_option" not in st.session_state:
|
| 96 |
st.session_state.prev_option = selected_model
|
|
|
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
import os
|
| 4 |
import sys
|
| 5 |
+
from PIL import Image
|
| 6 |
|
| 7 |
st.title("SmallZOO ChatBot 3B")
|
| 8 |
|
|
|
|
| 19 |
"Llama-3.2 [3B]":
|
| 20 |
{'description':"""The Llama-3.2 3B Instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
| 21 |
\nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \n""",
|
| 22 |
+
'logo':'Meta.png',
|
| 23 |
'url':'https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct'},
|
| 24 |
|
| 25 |
"Qwen2.5 [3B]":
|
| 26 |
{'description':"""The Qwen2.5 3B Instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
| 27 |
\nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \\n""",
|
| 28 |
+
'logo':'Qwen.png',
|
| 29 |
'url':'https://huggingface.co/Qwen/Qwen2.5-3B-Instruct'},
|
| 30 |
|
| 31 |
"Phi-3.5 [3.82B]":
|
| 32 |
+
{'description':"""The Phi-3.5 mini instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
| 33 |
\nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \ \n""",
|
| 34 |
+
'logo':'ms.png',
|
| 35 |
+
'url':'https://huggingface.co/microsoft/Phi-3.5-mini-instruct'},
|
| 36 |
}
|
| 37 |
|
| 38 |
def format_promt(message, custom_instructions=None):
|
|
|
|
| 89 |
|
| 90 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
| 91 |
st.sidebar.markdown(model_info[selected_model]['description'])
|
| 92 |
+
st.sidebar.image(model_info[selected_model]['logo'])
|
| 93 |
st.sidebar.markdown(f"[View model on 🤗 Hugging Face]({model_info[selected_model]['url']})")
|
| 94 |
+
st.sidebar.markdown("*Generated content can be outdated, inaccurate, offensive or non-factual!!!*")
|
| 95 |
|
| 96 |
if "prev_option" not in st.session_state:
|
| 97 |
st.session_state.prev_option = selected_model
|