Spaces:
Running
Running
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +3 -2
src/streamlit_app.py
CHANGED
|
@@ -29,8 +29,9 @@ device = torch.device("cpu")
|
|
| 29 |
@st.cache_resource
|
| 30 |
def load_model(model_name: str = "meta-llama/Llama-3.2-1B"):
|
| 31 |
"""Load and cache the tokenizer and model."""
|
| 32 |
-
|
| 33 |
-
|
|
|
|
| 34 |
model = model.to(device)
|
| 35 |
return tokenizer, model
|
| 36 |
|
|
|
|
| 29 |
@st.cache_resource
|
| 30 |
def load_model(model_name: str = "meta-llama/Llama-3.2-1B"):
|
| 31 |
"""Load and cache the tokenizer and model."""
|
| 32 |
+
token = os.environ.get("HF_TOKEN")
|
| 33 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, token=token)
|
| 34 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, token=token)
|
| 35 |
model = model.to(device)
|
| 36 |
return tokenizer, model
|
| 37 |
|