Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,7 +13,7 @@ with st.sidebar:
|
|
| 13 |
st.write('This chatbot is created using the open-source Llama model from Meta.')
|
| 14 |
|
| 15 |
# Use Hugging Face API Key from secrets or environment
|
| 16 |
-
api_key = os.getenv("
|
| 17 |
|
| 18 |
if not api_key:
|
| 19 |
st.error("Hugging Face API key is missing!")
|
|
@@ -28,13 +28,16 @@ with st.sidebar:
|
|
| 28 |
st.stop()
|
| 29 |
|
| 30 |
st.subheader('Models and parameters')
|
| 31 |
-
selected_model = st.sidebar.selectbox('Choose a Llama model', ['Llama-3.2-1B', 'Llama-7B'], key='selected_model')
|
| 32 |
|
| 33 |
-
# Model
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=1.0, value=0.1, step=0.01)
|
| 40 |
top_p = st.sidebar.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01)
|
|
@@ -56,8 +59,8 @@ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
|
|
| 56 |
|
| 57 |
# Load the tokenizer and model
|
| 58 |
try:
|
| 59 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 60 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 61 |
except Exception as e:
|
| 62 |
st.error(f"Error loading model: {e}")
|
| 63 |
st.stop()
|
|
|
|
| 13 |
st.write('This chatbot is created using the open-source Llama model from Meta.')
|
| 14 |
|
| 15 |
# Use Hugging Face API Key from secrets or environment
|
| 16 |
+
api_key = os.getenv("llama3")
|
| 17 |
|
| 18 |
if not api_key:
|
| 19 |
st.error("Hugging Face API key is missing!")
|
|
|
|
| 28 |
st.stop()
|
| 29 |
|
| 30 |
st.subheader('Models and parameters')
|
|
|
|
| 31 |
|
| 32 |
+
# Model selection
|
| 33 |
+
model_options = [
|
| 34 |
+
"meta-llama/Llama-3.2-1B",
|
| 35 |
+
"meta-llama/Llama-3.2-1B-Instruct",
|
| 36 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
| 37 |
+
"meta-llama/Llama-3.2-3B"
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
selected_model = st.sidebar.selectbox('Choose a Llama model', model_options, key='selected_model')
|
| 41 |
|
| 42 |
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=1.0, value=0.1, step=0.01)
|
| 43 |
top_p = st.sidebar.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01)
|
|
|
|
| 59 |
|
| 60 |
# Load the tokenizer and model
|
| 61 |
try:
|
| 62 |
+
tokenizer = AutoTokenizer.from_pretrained(selected_model)
|
| 63 |
+
model = AutoModelForCausalLM.from_pretrained(selected_model, torch_dtype=torch.bfloat16, device_map="auto")
|
| 64 |
except Exception as e:
|
| 65 |
st.error(f"Error loading model: {e}")
|
| 66 |
st.stop()
|