Spaces:
Running
Running
Exclude transcripts from HF Spaces deployment
Browse files- Update .gitignore to exclude transcripts/ folder
- Remove example references from app.py
- Fix Dockerfile build with proper cmake args for llama-cpp-python
- .gitignore +4 -0
- Dockerfile +10 -2
- README.md +1 -2
- app.py +1 -2
.gitignore
CHANGED
|
@@ -43,6 +43,10 @@ models/
|
|
| 43 |
summary.txt
|
| 44 |
thinking.txt
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
# Gradio
|
| 47 |
.gradio/
|
| 48 |
flagged/
|
|
|
|
| 43 |
summary.txt
|
| 44 |
thinking.txt
|
| 45 |
|
| 46 |
+
# Input files (don't push to HF Spaces)
|
| 47 |
+
transcripts/
|
| 48 |
+
uploaded_files/
|
| 49 |
+
|
| 50 |
# Gradio
|
| 51 |
.gradio/
|
| 52 |
flagged/
|
Dockerfile
CHANGED
|
@@ -2,18 +2,26 @@ FROM python:3.10-slim
|
|
| 2 |
|
| 3 |
WORKDIR /app
|
| 4 |
|
| 5 |
-
# Install system dependencies (
|
| 6 |
RUN apt-get update && apt-get install -y \
|
|
|
|
|
|
|
|
|
|
| 7 |
libopencc-dev \
|
| 8 |
&& rm -rf /var/lib/apt/lists/*
|
| 9 |
|
| 10 |
# Copy requirements first for better caching
|
| 11 |
COPY requirements.txt .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
RUN pip install --no-cache-dir -r requirements.txt
|
| 13 |
|
| 14 |
# Copy application files
|
| 15 |
COPY app.py .
|
| 16 |
-
COPY transcripts/ ./transcripts/
|
| 17 |
|
| 18 |
# Pre-download model on build (optional, speeds up first run)
|
| 19 |
# RUN python -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='unsloth/Qwen3-0.6B-GGUF', filename='Qwen3-0.6B-Q4_K_M.gguf', local_dir='./models')"
|
|
|
|
| 2 |
|
| 3 |
WORKDIR /app
|
| 4 |
|
| 5 |
+
# Install system dependencies (build tools needed for llama-cpp-python)
|
| 6 |
RUN apt-get update && apt-get install -y \
|
| 7 |
+
build-essential \
|
| 8 |
+
cmake \
|
| 9 |
+
python3-dev \
|
| 10 |
libopencc-dev \
|
| 11 |
&& rm -rf /var/lib/apt/lists/*
|
| 12 |
|
| 13 |
# Copy requirements first for better caching
|
| 14 |
COPY requirements.txt .
|
| 15 |
+
|
| 16 |
+
# Install llama-cpp-python with CPU-only support (no AVX requirements for broader compatibility)
|
| 17 |
+
RUN CMAKE_ARGS="-DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DLLAMA_F16C=OFF" \
|
| 18 |
+
pip install --no-cache-dir llama-cpp-python
|
| 19 |
+
|
| 20 |
+
# Install remaining requirements
|
| 21 |
RUN pip install --no-cache-dir -r requirements.txt
|
| 22 |
|
| 23 |
# Copy application files
|
| 24 |
COPY app.py .
|
|
|
|
| 25 |
|
| 26 |
# Pre-download model on build (optional, speeds up first run)
|
| 27 |
# RUN python -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='unsloth/Qwen3-0.6B-GGUF', filename='Qwen3-0.6B-Q4_K_M.gguf', local_dir='./models')"
|
README.md
CHANGED
|
@@ -1,7 +1,6 @@
|
|
| 1 |
---
|
| 2 |
title: Tiny Scribe - Transcript Summarizer
|
| 3 |
-
emoji:
|
| 4 |
-
|
| 5 |
colorFrom: blue
|
| 6 |
colorTo: green
|
| 7 |
sdk: docker
|
|
|
|
| 1 |
---
|
| 2 |
title: Tiny Scribe - Transcript Summarizer
|
| 3 |
+
emoji: "📄"
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: green
|
| 6 |
sdk: docker
|
app.py
CHANGED
|
@@ -251,8 +251,7 @@ def create_interface():
|
|
| 251 |
show_progress=True
|
| 252 |
)
|
| 253 |
|
| 254 |
-
|
| 255 |
-
# Users can upload their own .txt files
|
| 256 |
|
| 257 |
return demo
|
| 258 |
|
|
|
|
| 251 |
show_progress=True
|
| 252 |
)
|
| 253 |
|
| 254 |
+
|
|
|
|
| 255 |
|
| 256 |
return demo
|
| 257 |
|