mlbench123 commited on
Commit
fec0153
·
verified ·
1 Parent(s): b2f54cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -30
app.py CHANGED
@@ -1,30 +1,19 @@
1
- #!/usr/bin/env python3
2
- """
3
- Hugging Face Spaces entrypoint.
4
-
5
- HF Spaces looks for either:
6
- - app.py with a variable named `demo` or `app`, OR
7
- - a Gradio `Blocks` returned and launched.
8
-
9
- This file reuses your existing Gradio UI factory.
10
- """
11
-
12
- import os
13
-
14
- # Optional: you can set defaults for HF here
15
- os.environ.setdefault("DB_XLSX", "database.xlsx")
16
- os.environ.setdefault("EMB_CACHE", "treatment_embeddings.pkl")
17
-
18
- # IMPORTANT: in HF we do NOT have Ollama. Use transformers backend.
19
- os.environ.setdefault("LOCAL_LLM_PROVIDER", "transformers")
20
-
21
- # Choose a CPU-friendly open model (no auth required).
22
- # Good default: TinyLlama (fast-ish on CPU).
23
- os.environ.setdefault("HF_LLM_MODEL", "TinyLlama/TinyLlama-1.1B-Chat-v1.0")
24
-
25
- from gradio_new_rag_app import make_app
26
-
27
- demo = make_app()
28
-
29
- if __name__ == "__main__":
30
- demo.launch()
 
1
+ import os
2
+ import sys
3
+
4
+ # Ensure this folder is importable on HF (Linux)
5
+ sys.path.append(os.path.dirname(__file__))
6
+
7
+ # HF Spaces: do NOT use Ollama; use transformers backend
8
+ os.environ.setdefault("LOCAL_LLM_PROVIDER", "transformers")
9
+
10
+ # CPU-friendly default model for Spaces
11
+ os.environ.setdefault("HF_LLM_MODEL", "TinyLlama/TinyLlama-1.1B-Chat-v1.0")
12
+
13
+ # Data file defaults (you will upload these)
14
+ os.environ.setdefault("DB_XLSX", "database.xlsx")
15
+ os.environ.setdefault("EMB_CACHE", "treatment_embeddings.pkl")
16
+
17
+ from gradio_new_rag_app import make_app
18
+
19
+ demo = make_app()