Spaces:
Running on Zero
Running on Zero
Rojaldo commited on
Commit ·
c2990b1
1
Parent(s): 2358785
Add config file and auto-download model feature to app.py
Browse files- app.py +25 -7
- config/config.yaml +65 -0
- download_models.py +28 -0
app.py
CHANGED
|
@@ -19,6 +19,29 @@ except ImportError:
|
|
| 19 |
model = None
|
| 20 |
rag_system = None
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
def load_models():
|
| 23 |
"""Load models lazily on first use."""
|
| 24 |
global model, rag_system
|
|
@@ -26,13 +49,8 @@ def load_models():
|
|
| 26 |
if model is None:
|
| 27 |
print("Initializing models...")
|
| 28 |
|
| 29 |
-
# Check for LoRA adapter
|
| 30 |
-
adapter_path =
|
| 31 |
-
if Path("./models/francis_botcon_lora").exists():
|
| 32 |
-
adapter_path = "./models/francis_botcon_lora"
|
| 33 |
-
print("✓ LoRA adapter found")
|
| 34 |
-
else:
|
| 35 |
-
print("⚠ LoRA adapter not found, using base model")
|
| 36 |
|
| 37 |
# Initialize model
|
| 38 |
model = FrancisModel(adapter_path=adapter_path)
|
|
|
|
| 19 |
model = None
|
| 20 |
rag_system = None
|
| 21 |
|
| 22 |
+
def download_model_if_needed():
|
| 23 |
+
"""Download LoRA adapter from Hugging Face Hub if not present."""
|
| 24 |
+
from huggingface_hub import snapshot_download
|
| 25 |
+
|
| 26 |
+
adapter_path = Path("./models/francis_botcon_lora")
|
| 27 |
+
|
| 28 |
+
if not adapter_path.exists():
|
| 29 |
+
print("Downloading LoRA adapter from Hugging Face Hub...")
|
| 30 |
+
try:
|
| 31 |
+
snapshot_download(
|
| 32 |
+
repo_id="rojaldo/francis-botcon-lora",
|
| 33 |
+
local_dir=str(adapter_path),
|
| 34 |
+
repo_type="model"
|
| 35 |
+
)
|
| 36 |
+
print("✓ LoRA adapter downloaded successfully")
|
| 37 |
+
return str(adapter_path)
|
| 38 |
+
except Exception as e:
|
| 39 |
+
print(f"⚠ Could not download model: {e}")
|
| 40 |
+
print("Using base model only")
|
| 41 |
+
return None
|
| 42 |
+
|
| 43 |
+
return str(adapter_path)
|
| 44 |
+
|
| 45 |
def load_models():
|
| 46 |
"""Load models lazily on first use."""
|
| 47 |
global model, rag_system
|
|
|
|
| 49 |
if model is None:
|
| 50 |
print("Initializing models...")
|
| 51 |
|
| 52 |
+
# Check for or download LoRA adapter
|
| 53 |
+
adapter_path = download_model_if_needed()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
# Initialize model
|
| 56 |
model = FrancisModel(adapter_path=adapter_path)
|
config/config.yaml
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Francis Botcon Configuration
|
| 2 |
+
|
| 3 |
+
# Model Configuration
|
| 4 |
+
model:
|
| 5 |
+
base_model: "mistralai/Mistral-7B-Instruct-v0.2" # Open-source high-quality 7B model
|
| 6 |
+
quantization: false
|
| 7 |
+
quantization_bits: 4
|
| 8 |
+
device: "cuda" # Change to "cpu" if GPU not available
|
| 9 |
+
max_memory: 16 # GB
|
| 10 |
+
|
| 11 |
+
# LoRA Configuration
|
| 12 |
+
lora:
|
| 13 |
+
r: 16
|
| 14 |
+
lora_alpha: 32
|
| 15 |
+
target_modules: ["q_proj", "v_proj"]
|
| 16 |
+
lora_dropout: 0.05
|
| 17 |
+
bias: "none"
|
| 18 |
+
task_type: "CAUSAL_LM"
|
| 19 |
+
|
| 20 |
+
# Training Configuration
|
| 21 |
+
training:
|
| 22 |
+
epochs: 3
|
| 23 |
+
learning_rate: 2e-4
|
| 24 |
+
batch_size: 4
|
| 25 |
+
gradient_accumulation_steps: 2
|
| 26 |
+
max_seq_length: 1024
|
| 27 |
+
warmup_steps: 100
|
| 28 |
+
weight_decay: 0.01
|
| 29 |
+
output_dir: "./models/francis_botcon_lora"
|
| 30 |
+
|
| 31 |
+
# Embeddings Configuration
|
| 32 |
+
embeddings:
|
| 33 |
+
model: "sentence-transformers/all-MiniLM-L6-v2"
|
| 34 |
+
device: "cuda"
|
| 35 |
+
|
| 36 |
+
# Vector DB Configuration
|
| 37 |
+
vector_db:
|
| 38 |
+
type: "chromadb" # Options: "chromadb" or "faiss"
|
| 39 |
+
db_path: "./data/vectordb"
|
| 40 |
+
chunk_size: 500
|
| 41 |
+
chunk_overlap: 100
|
| 42 |
+
top_k: 5
|
| 43 |
+
similarity_threshold: 0.6
|
| 44 |
+
|
| 45 |
+
# Generation Configuration
|
| 46 |
+
generation:
|
| 47 |
+
temperature: 0.7
|
| 48 |
+
max_tokens: 512
|
| 49 |
+
top_p: 0.9
|
| 50 |
+
top_k: 50
|
| 51 |
+
do_sample: true
|
| 52 |
+
|
| 53 |
+
# Application Configuration
|
| 54 |
+
app:
|
| 55 |
+
port: 7860
|
| 56 |
+
share: false
|
| 57 |
+
debug: true
|
| 58 |
+
log_level: "INFO"
|
| 59 |
+
|
| 60 |
+
# Data paths
|
| 61 |
+
data:
|
| 62 |
+
raw_dir: "./data/raw"
|
| 63 |
+
processed_dir: "./data/processed"
|
| 64 |
+
vectordb_dir: "./data/vectordb"
|
| 65 |
+
dataset_dir: "./data/dataset"
|
download_models.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Download models from Hugging Face Hub for the Space."""
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from huggingface_hub import snapshot_download
|
| 7 |
+
|
| 8 |
+
def download_lora_adapter():
|
| 9 |
+
"""Download the LoRA adapter from Hugging Face Hub."""
|
| 10 |
+
repo_id = "rojaldo/francis-botcon-lora"
|
| 11 |
+
cache_dir = Path("./models")
|
| 12 |
+
cache_dir.mkdir(parents=True, exist_ok=True)
|
| 13 |
+
|
| 14 |
+
print(f"Downloading {repo_id}...")
|
| 15 |
+
try:
|
| 16 |
+
model_path = snapshot_download(
|
| 17 |
+
repo_id=repo_id,
|
| 18 |
+
local_dir=str(cache_dir / "francis_botcon_lora"),
|
| 19 |
+
repo_type="model"
|
| 20 |
+
)
|
| 21 |
+
print(f"✓ Model downloaded to: {model_path}")
|
| 22 |
+
return True
|
| 23 |
+
except Exception as e:
|
| 24 |
+
print(f"Error downloading model: {e}")
|
| 25 |
+
return False
|
| 26 |
+
|
| 27 |
+
if __name__ == "__main__":
|
| 28 |
+
download_lora_adapter()
|