Spaces:
Sleeping
Sleeping
Add .env file support and detailed debugging
Browse files
app.py
CHANGED
|
@@ -6,6 +6,16 @@ import traceback
|
|
| 6 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 7 |
import torch
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
# ํ๊ฒฝ ๋ณ์์์๋ง ํ ํฐ ๊ฐ์ ธ์ค๊ธฐ (๋ณด์)
|
| 10 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 11 |
MODEL_NAME = os.getenv("MODEL_NAME", "gbrabbit/lily-math-model")
|
|
@@ -186,7 +196,7 @@ with gr.Blocks(title="Lily Math RAG System", theme=gr.themes.Soft()) as demo:
|
|
| 186 |
gr.Markdown(f"**๋ชจ๋ธ**: {MODEL_NAME}")
|
| 187 |
gr.Markdown(f"**๋ชจ๋ธ ์ํ**: {'โ
๋ก๋๋จ' if MODEL_LOADED else 'โ ๋ก๋ ์คํจ'}")
|
| 188 |
gr.Markdown(f"**ํ ํฐ ์ํ**: {'โ
์ค์ ๋จ' if HF_TOKEN else 'โ ์ค์ ๋์ง ์์'}")
|
| 189 |
-
gr.Markdown("**๋ฒ์ **: 2.
|
| 190 |
|
| 191 |
if __name__ == "__main__":
|
| 192 |
demo.launch()
|
|
|
|
| 6 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 7 |
import torch
|
| 8 |
|
| 9 |
+
# .env ํ์ผ์์ ํ๊ฒฝ ๋ณ์ ๋ก๋
|
| 10 |
+
try:
|
| 11 |
+
from dotenv import load_dotenv
|
| 12 |
+
load_dotenv()
|
| 13 |
+
print("โ
.env ํ์ผ ๋ก๋๋จ")
|
| 14 |
+
except ImportError:
|
| 15 |
+
print("โ ๏ธ python-dotenv๊ฐ ์ค์น๋์ง ์์, ์์คํ
ํ๊ฒฝ ๋ณ์ ์ฌ์ฉ")
|
| 16 |
+
except Exception as e:
|
| 17 |
+
print(f"โ ๏ธ .env ํ์ผ ๋ก๋ ์คํจ: {e}")
|
| 18 |
+
|
| 19 |
# ํ๊ฒฝ ๋ณ์์์๋ง ํ ํฐ ๊ฐ์ ธ์ค๊ธฐ (๋ณด์)
|
| 20 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 21 |
MODEL_NAME = os.getenv("MODEL_NAME", "gbrabbit/lily-math-model")
|
|
|
|
| 196 |
gr.Markdown(f"**๋ชจ๋ธ**: {MODEL_NAME}")
|
| 197 |
gr.Markdown(f"**๋ชจ๋ธ ์ํ**: {'โ
๋ก๋๋จ' if MODEL_LOADED else 'โ ๋ก๋ ์คํจ'}")
|
| 198 |
gr.Markdown(f"**ํ ํฐ ์ํ**: {'โ
์ค์ ๋จ' if HF_TOKEN else 'โ ์ค์ ๋์ง ์์'}")
|
| 199 |
+
gr.Markdown("**๋ฒ์ **: 2.8.0 (.env ํ์ผ ์ง์)")
|
| 200 |
|
| 201 |
if __name__ == "__main__":
|
| 202 |
demo.launch()
|