Spaces:
Sleeping
Sleeping
Commit ·
002c934
1
Parent(s): 1c8c610
requirements.txt updated
Browse files- Dockerfile +31 -11
- config/settings.py +4 -4
- services/llm_interpreter.py +1 -1
Dockerfile
CHANGED
|
@@ -2,33 +2,53 @@ FROM python:3.10-slim-bullseye
|
|
| 2 |
|
| 3 |
ENV PYTHONUNBUFFERED=1
|
| 4 |
ENV PIP_NO_CACHE_DIR=1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
WORKDIR /app
|
| 7 |
|
| 8 |
-
# System deps
|
| 9 |
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 10 |
build-essential \
|
| 11 |
libglib2.0-0 \
|
| 12 |
libjpeg62-turbo \
|
| 13 |
poppler-utils \
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
ENV FORCE_CMAKE=1
|
| 20 |
|
|
|
|
| 21 |
COPY requirements.txt /app/requirements.txt
|
| 22 |
|
| 23 |
-
# Install Python dependencies
|
| 24 |
RUN pip install --upgrade pip && \
|
| 25 |
-
pip install -r requirements.txt
|
| 26 |
|
| 27 |
-
# Download spaCy model
|
| 28 |
RUN python -m spacy download en_core_web_sm
|
| 29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
COPY . .
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
EXPOSE 7860
|
| 33 |
|
| 34 |
-
|
|
|
|
|
|
| 2 |
|
| 3 |
ENV PYTHONUNBUFFERED=1
|
| 4 |
ENV PIP_NO_CACHE_DIR=1
|
| 5 |
+
ENV DOCKER_CONTAINER=true
|
| 6 |
+
ENV SPACE_APP_DATA=/data
|
| 7 |
+
ENV HF_HOME=/data/huggingface # For HF cache
|
| 8 |
+
|
| 9 |
+
# Optimize llama-cpp-python build for CPU only
|
| 10 |
+
ENV CMAKE_ARGS="-DLLAMA_BLAS=0 -DLLAMA_CUBLAS=0"
|
| 11 |
+
ENV FORCE_CMAKE=1
|
| 12 |
|
| 13 |
WORKDIR /app
|
| 14 |
|
| 15 |
+
# System deps - minimal for HuggingFace Spaces
|
| 16 |
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 17 |
build-essential \
|
| 18 |
libglib2.0-0 \
|
| 19 |
libjpeg62-turbo \
|
| 20 |
poppler-utils \
|
| 21 |
+
libmagic1 \
|
| 22 |
+
curl \
|
| 23 |
+
git \ # ← ADD for potential git operations
|
| 24 |
+
&& rm -rf /var/lib/apt/lists/* \
|
| 25 |
+
&& apt-get clean
|
|
|
|
| 26 |
|
| 27 |
+
# Copy requirements first for better layer caching
|
| 28 |
COPY requirements.txt /app/requirements.txt
|
| 29 |
|
| 30 |
+
# Install Python dependencies with specific versions
|
| 31 |
RUN pip install --upgrade pip && \
|
| 32 |
+
pip install -r requirements.txt --no-cache-dir
|
| 33 |
|
| 34 |
+
# Download spaCy model (after dependencies)
|
| 35 |
RUN python -m spacy download en_core_web_sm
|
| 36 |
|
| 37 |
+
# Create directories that your app expects
|
| 38 |
+
RUN mkdir -p /data/models /data/uploads /data/cache /data/logs /data/huggingface
|
| 39 |
+
|
| 40 |
+
# Copy app code
|
| 41 |
COPY . .
|
| 42 |
|
| 43 |
+
# Set proper permissions
|
| 44 |
+
RUN chmod -R 755 /app && \
|
| 45 |
+
chmod -R 755 /data
|
| 46 |
+
|
| 47 |
+
# Health check (remove if you don't have /health endpoint)
|
| 48 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
|
| 49 |
+
CMD curl -f http://localhost:7860/docs || exit 1 # Changed to /docs endpoint
|
| 50 |
+
|
| 51 |
EXPOSE 7860
|
| 52 |
|
| 53 |
+
# Use multiple workers for better performance
|
| 54 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "2"]
|
config/settings.py
CHANGED
|
@@ -74,7 +74,7 @@ class Settings(BaseSettings):
|
|
| 74 |
ANTHROPIC_MAX_TOKENS : int = 1024
|
| 75 |
|
| 76 |
# Priority order for LLM providers
|
| 77 |
-
LLM_PROVIDER_PRIORITY : list = ["ollama", "openai", "anthropic",
|
| 78 |
|
| 79 |
# Which providers are available
|
| 80 |
ENABLE_OLLAMA : bool = True
|
|
@@ -84,7 +84,7 @@ class Settings(BaseSettings):
|
|
| 84 |
ENABLE_HF_INFERENCE : bool = False # HuggingFace Inference API
|
| 85 |
|
| 86 |
# Default provider (auto-selected based on environment)
|
| 87 |
-
LLM_DEFAULT_PROVIDER : str = "
|
| 88 |
|
| 89 |
# Huggingface Inference Settings (Optional)
|
| 90 |
HF_MODEL_ID : Optional[str] = None # e.g. "meta-llama/Llama-2-7b-chat-hf"
|
|
@@ -234,12 +234,12 @@ class Settings(BaseSettings):
|
|
| 234 |
return priority if priority else ["ollama"]
|
| 235 |
|
| 236 |
|
| 237 |
-
@field_validator('LLM_DEFAULT_PROVIDER', mode='after')
|
| 238 |
def set_default_provider(cls, v, info):
|
| 239 |
"""
|
| 240 |
Set default provider based on availability
|
| 241 |
"""
|
| 242 |
-
values
|
| 243 |
|
| 244 |
# Get the priority list (after adjustments)
|
| 245 |
priority = values.get('LLM_PROVIDER_PRIORITY', [])
|
|
|
|
| 74 |
ANTHROPIC_MAX_TOKENS : int = 1024
|
| 75 |
|
| 76 |
# Priority order for LLM providers
|
| 77 |
+
LLM_PROVIDER_PRIORITY : list = ["llama_cpp", "ollama", "openai", "anthropic", ]
|
| 78 |
|
| 79 |
# Which providers are available
|
| 80 |
ENABLE_OLLAMA : bool = True
|
|
|
|
| 84 |
ENABLE_HF_INFERENCE : bool = False # HuggingFace Inference API
|
| 85 |
|
| 86 |
# Default provider (auto-selected based on environment)
|
| 87 |
+
LLM_DEFAULT_PROVIDER : str = "llama_cpp"
|
| 88 |
|
| 89 |
# Huggingface Inference Settings (Optional)
|
| 90 |
HF_MODEL_ID : Optional[str] = None # e.g. "meta-llama/Llama-2-7b-chat-hf"
|
|
|
|
| 234 |
return priority if priority else ["ollama"]
|
| 235 |
|
| 236 |
|
| 237 |
+
@field_validator('LLM_DEFAULT_PROVIDER', mode = 'after')
|
| 238 |
def set_default_provider(cls, v, info):
|
| 239 |
"""
|
| 240 |
Set default provider based on availability
|
| 241 |
"""
|
| 242 |
+
values = info.data
|
| 243 |
|
| 244 |
# Get the priority list (after adjustments)
|
| 245 |
priority = values.get('LLM_PROVIDER_PRIORITY', [])
|
services/llm_interpreter.py
CHANGED
|
@@ -29,7 +29,7 @@ class LLMClauseInterpreter:
|
|
| 29 |
"""
|
| 30 |
Uses LLM to generate plain-English explanations for legal clauses and integrated with RiskAnalyzer results and RiskRules framework
|
| 31 |
"""
|
| 32 |
-
def __init__(self, llm_manager: LLMManager, default_provider: LLMProvider =
|
| 33 |
"""
|
| 34 |
Initialize LLM interpreter
|
| 35 |
|
|
|
|
| 29 |
"""
|
| 30 |
Uses LLM to generate plain-English explanations for legal clauses and integrated with RiskAnalyzer results and RiskRules framework
|
| 31 |
"""
|
| 32 |
+
def __init__(self, llm_manager: LLMManager, default_provider: LLMProvider = None):
|
| 33 |
"""
|
| 34 |
Initialize LLM interpreter
|
| 35 |
|