Update app.py
Browse files
app.py
CHANGED
|
@@ -1,10 +1,22 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import torch
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
-
# Function to load model and
|
| 6 |
@st.cache_resource # This decorator caches the loading process
|
| 7 |
def load_resources():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
# Load the pre-trained Llama3 model (or your fine-tuned model)
|
| 9 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
|
| 10 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import torch
|
| 4 |
+
import os
|
| 5 |
+
import subprocess
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
|
| 8 |
+
# Function to load model, tokenizer, and dataset only once
|
| 9 |
@st.cache_resource # This decorator caches the loading process
|
| 10 |
def load_resources():
|
| 11 |
+
load_dotenv()
|
| 12 |
+
|
| 13 |
+
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
| 14 |
+
|
| 15 |
+
# Set the environment variable using os
|
| 16 |
+
# huggingface_token = st.secrets["huggingface"]["token"]
|
| 17 |
+
|
| 18 |
+
# Run the huggingface-cli login command from the Python script using subprocess
|
| 19 |
+
subprocess.run(["huggingface-cli", "login", "--token", huggingface_token])
|
| 20 |
# Load the pre-trained Llama3 model (or your fine-tuned model)
|
| 21 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
|
| 22 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
|