Spaces:
Sleeping
Sleeping
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +2 -2
src/streamlit_app.py
CHANGED
|
@@ -14,7 +14,7 @@ import streamlit as st
|
|
| 14 |
# It simplifies loading models/tokenizers and running common tasks
|
| 15 |
from transformers import pipeline
|
| 16 |
|
| 17 |
-
|
| 18 |
#api_key = os.getenv("HF_HUB_TOKEN")
|
| 19 |
|
| 20 |
# 1. Cache the pipeline so it loads once
|
|
@@ -24,7 +24,7 @@ def get_generator():
|
|
| 24 |
# - "text2text-generation" tells the pipeline we want a seq2seq model (T5 family)
|
| 25 |
# - model="google/flan-t5-small" specifies which pretrained model to load
|
| 26 |
# The pipeline object wraps both tokenizer and model for you.
|
| 27 |
-
return pipeline("text2text-generation", model="google/flan-t5-small", use_auth_token=
|
| 28 |
|
| 29 |
generator = get_generator()
|
| 30 |
|
|
|
|
| 14 |
# It simplifies loading models/tokenizers and running common tasks
|
| 15 |
from transformers import pipeline
|
| 16 |
|
| 17 |
+
hf_token = st.secrets["HF_HUB_TOKEN"]
|
| 18 |
#api_key = os.getenv("HF_HUB_TOKEN")
|
| 19 |
|
| 20 |
# 1. Cache the pipeline so it loads once
|
|
|
|
| 24 |
# - "text2text-generation" tells the pipeline we want a seq2seq model (T5 family)
|
| 25 |
# - model="google/flan-t5-small" specifies which pretrained model to load
|
| 26 |
# The pipeline object wraps both tokenizer and model for you.
|
| 27 |
+
return pipeline("text2text-generation", model="google/flan-t5-small", use_auth_token=hf_token)
|
| 28 |
|
| 29 |
generator = get_generator()
|
| 30 |
|