File size: 14,641 Bytes
842bd56
 
 
46a2271
a05296f
 
e068f6b
 
9a066c8
1bde1ff
e068f6b
1bde1ff
e068f6b
46a2271
d3a8e9d
842bd56
 
a05296f
 
 
 
 
1bde1ff
842bd56
6715353
d3a8e9d
 
 
 
 
 
 
 
46a2271
6715353
1bde1ff
2c87c83
 
46a2271
 
1bde1ff
 
 
e068f6b
 
 
842bd56
9a066c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bde1ff
842bd56
1bde1ff
 
 
9a066c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842bd56
d3a8e9d
842bd56
 
9a066c8
1bde1ff
 
 
 
2c87c83
 
9a066c8
2c87c83
 
9a066c8
 
 
 
 
 
2c87c83
9a066c8
 
 
2c87c83
9a066c8
 
 
 
 
 
 
 
 
 
 
 
 
 
d3a8e9d
 
842bd56
1bde1ff
 
 
2c87c83
1bde1ff
 
842bd56
 
1bde1ff
d3a8e9d
1bde1ff
d3a8e9d
1bde1ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46a2271
842bd56
1bde1ff
d3a8e9d
 
 
 
1bde1ff
 
d3a8e9d
 
1bde1ff
d3a8e9d
 
 
 
1bde1ff
d3a8e9d
1bde1ff
 
 
 
 
 
e068f6b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a05296f
e068f6b
 
 
 
 
 
 
 
 
 
 
 
a05296f
 
 
 
e068f6b
 
 
 
 
a05296f
 
e068f6b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a05296f
 
 
 
e068f6b
 
a05296f
e068f6b
 
a05296f
 
 
 
e068f6b
a05296f
e068f6b
 
 
a05296f
 
e068f6b
a05296f
e068f6b
a05296f
 
e068f6b
a05296f
 
e068f6b
a05296f
 
 
 
 
 
 
 
 
 
 
 
 
 
812549a
1bde1ff
812549a
 
 
a05296f
812549a
 
 
 
 
 
 
a05296f
812549a
 
 
 
1bde1ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d3a8e9d
842bd56
 
 
1bde1ff
842bd56
1bde1ff
812549a
1bde1ff
 
 
842bd56
a05296f
 
 
e068f6b
a05296f
 
 
 
 
 
e068f6b
a05296f
 
e068f6b
 
 
a05296f
 
 
 
812549a
 
842bd56
 
812549a
1bde1ff
6715353
1bde1ff
2c87c83
1bde1ff
 
6715353
1bde1ff
842bd56
1bde1ff
 
842bd56
1bde1ff
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
import os
import glob
import json
import psutil
import asyncio
import re
import tempfile
import shutil
from pathlib import Path
from typing import Any, Dict, List, Optional
from datetime import datetime, timedelta

from fastapi import FastAPI, Request, HTTPException, UploadFile, File
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from llama_cpp import Llama

try:
    import aiohttp
except ImportError:
    aiohttp = None

app = FastAPI(title="Hannah Pilot Interface")

# --- CORS Permissions ---
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# --- Configuration ---
# Map filenames to "Hannah" names
MODEL_MAP: Dict[str, str] = {
    "qwen2.5-0.5b-instruct-q2_k.gguf": "Hannah-1.1 Light",
    "qwen2.5-0.5b-instruct-q4_k_m.gguf": "Hannah-1.1 Heavy",
}

current_model: Optional[Llama] = None
current_model_name: str = ""

# --- File Upload Configuration ---
UPLOAD_DIR = Path(tempfile.gettempdir()) / "hannah_uploads"


def _model_abs_path(model_name: str) -> Path:
    # Always resolve relative to the app directory to avoid cwd surprises.
    base_dir = Path(__file__).resolve().parent
    return (base_dir / model_name).resolve()


def _looks_like_pointer_file(path: Path) -> bool:
    # If the GGUF file is a Git LFS pointer (or similar), llama.cpp will fail to load it.
    try:
        if not path.exists() or path.is_dir():
            return False
        head = path.read_bytes()[:256]
        if b"git-lfs" in head and b"oid sha256" in head:
            return True
        # Some pointer files are plain text starting with "version".
        if head.startswith(b"version ") and b"sha256" in head:
            return True
        return False
    except Exception:
        return False


def _try_load_model(
    model_path: Path, *, n_ctx: int, n_threads: int, n_batch: int
) -> Llama:
    # Keep this tiny and explicit so we can retry with different params.
    return Llama(
        model_path=str(model_path),
        n_ctx=n_ctx,
        n_threads=n_threads,
        n_batch=n_batch,
        # mmap tends to be friendlier on low-memory CPU machines
        use_mmap=True,
        verbose=False,
    )


def get_model(model_name: str) -> Llama:
    global current_model, current_model_name

    if not model_name:
        raise HTTPException(status_code=400, detail="No model selected")

    model_path = _model_abs_path(model_name)
    if not model_path.exists():
        raise HTTPException(
            status_code=404,
            detail=f"Model file not found: {model_path.name}",
        )
    if _looks_like_pointer_file(model_path):
        raise HTTPException(
            status_code=500,
            detail=(
                "Model file looks like a pointer (not the real .gguf). "
                "Re-upload the GGUF to the Space (so it is stored as the full binary), "
                "then restart the Space."
            ),
        )
    try:
        size_mb = model_path.stat().st_size / (1024 * 1024)
    except Exception:
        size_mb = -1

    if current_model_name == model_name and current_model is not None:
        return current_model

    print(f"Loading {model_path.name} ({size_mb:.1f} MB)...")
    if current_model is not None:
        del current_model

    # --- PERFORMANCE TUNING (HF Free CPU) ---
    # Increased context for Hannah 1.1 with better memory management
    # 4096 ctx provides more context awareness; fallback to 2048 if needed
    threads = int(os.getenv("N_THREADS", "2"))
    n_ctx = int(os.getenv("N_CTX", "4096"))  # Increased from 2048
    n_batch = int(os.getenv("N_BATCH", "512"))  # Increased from 256

    try:
        current_model = _try_load_model(
            model_path, n_ctx=n_ctx, n_threads=threads, n_batch=n_batch
        )
    except Exception as e:
        # Retry with conservative settings in case of memory pressure
        print(f"Model load failed with N_CTX={n_ctx}, N_BATCH={n_batch}: {e}")
        try:
            current_model = _try_load_model(
                model_path, n_ctx=2048, n_threads=threads, n_batch=256
            )
        except Exception as e2:
            print(f"Model load retry failed: {e2}")
            raise HTTPException(
                status_code=500,
                detail=(
                    "Failed to load GGUF model. This is usually caused by: "
                    "(1) model file not fully present inside the container, "
                    "(2) not enough RAM for the chosen context size, or "
                    "(3) llama-cpp-python too old for this GGUF. "
                    f"Model: {model_path.name}"
                ),
            )

    current_model_name = model_name
    return current_model


@app.get("/")
async def root():
    return {"status": "ok", "name": "Hannah-1.1"}


@app.get("/api/models")
async def list_models():
    models_info: List[Dict[str, Any]] = []
    for f in glob.glob("*.gguf"):
        display_name = MODEL_MAP.get(f, f)
        size_mb = os.path.getsize(f) / (1024 * 1024)
        models_info.append(
            {
                "filename": f,
                "display_name": display_name,
                "size": f"{size_mb:.1f} MB",
            }
        )

    # Stable ordering (Heavy first if present)
    models_info.sort(
        key=lambda x: (
            "Heavy" not in x.get("display_name", ""),
            x.get("display_name", ""),
        )
    )
    return {"models": models_info}


@app.get("/api/status")
async def system_status():
    ram = psutil.virtual_memory()
    return {
        "ram_used": f"{ram.used / (1024 * 1024):.0f} MB",
        "cpu": f"{psutil.cpu_percent()}%",
    }


@app.post("/api/gen_title")
async def gen_title(request: Request):
    try:
        data = await request.json()
        message = (data.get("message") or "").strip()
        words = message.split()[:4]
        title = " ".join(words).capitalize() + ("..." if words else "")
        return {"title": title or "New Chat"}
    except Exception:
        return {"title": "New Chat"}


def cleanup_old_files(max_age_hours: int = 24):
    """Remove files older than max_age_hours from upload directory."""
    if not UPLOAD_DIR.exists():
        return

    now = datetime.now()
    for file_path in UPLOAD_DIR.glob("*"):
        if file_path.is_file():
            file_age = now - datetime.fromtimestamp(file_path.stat().st_mtime)
            if file_age.total_seconds() > max_age_hours * 3600:
                try:
                    file_path.unlink()
                except Exception:
                    pass


@app.post("/api/upload")
async def upload_file(file: UploadFile = File(...)):
    """Upload a file and store it temporarily. Returns preview and file path."""
    try:
        # Create upload directory if it doesn't exist
        UPLOAD_DIR.mkdir(parents=True, exist_ok=True)

        # Check file size (50MB limit)
        content = await file.read()
        if len(content) > 50 * 1024 * 1024:
            raise HTTPException(status_code=413, detail="File too large (max 50MB)")

        # Save file with timestamp
        timestamp = datetime.now().timestamp()
        file_path = UPLOAD_DIR / f"{timestamp}_{file.filename}"

        with open(file_path, "wb") as f:
            f.write(content)

        # Try to extract text preview
        preview = None
        try:
            text_content = content.decode("utf-8", errors="ignore")
            preview = text_content[:1000]  # First 1000 chars
        except Exception:
            pass

        # Run cleanup in background
        cleanup_old_files()

        return {
            "success": True,
            "filename": file.filename,
            "file_url": str(file_path),
            "size_kb": len(content) / 1024,
            "preview": preview,
        }
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


def extract_file_urls(message: str) -> List[str]:
    """Extract file URLs from message (Google Drive URLs and uploaded file paths)."""
    urls = []

    # Extract Google Drive URLs
    drive_pattern = r"https://drive\.google\.com/[^\s\)\"<>]*"
    urls.extend(re.findall(drive_pattern, message))

    # Extract uploaded file references: [File uploaded: path]
    upload_pattern = r"\[File uploaded: ([^\]]+)\]"
    urls.extend(re.findall(upload_pattern, message))

    return urls


async def fetch_file_from_url(file_url: str, max_size: int = 10 * 1024 * 1024) -> str:
    """
    Fetch a file from URL or local path and return its content as text.
    Works with:
    - Local file paths (uploaded files)
    - Google Drive URLs
    - Text files via HTTP
    """
    try:
        # Check if it's a local file path first
        local_path = Path(file_url)
        if local_path.exists() and local_path.is_file():
            try:
                with open(local_path, "rb") as f:
                    content = f.read()

                if len(content) > max_size:
                    return f"[File too large to process: {len(content) / 1024 / 1024:.1f}MB, max 10MB]"

                try:
                    text = content.decode("utf-8", errors="ignore")
                    return text[:3000]
                except Exception:
                    return f"[Binary file detected. Size: {len(content) / 1024:.1f}KB.]"
            except Exception as e:
                return f"[Could not read local file: {str(e)[:100]}]"

        # Handle remote URLs (Google Drive, HTTP, etc.)
        if not aiohttp:
            return "[File fetching requires aiohttp - install via pip install aiohttp]"

        # Convert Google Drive sharing link to direct download link if needed
        if "drive.google.com" in file_url:
            # Extract file ID from Google Drive URL
            import re

            file_id_match = re.search(r"/d/([a-zA-Z0-9-_]+)", file_url)
            if not file_id_match:
                file_id_match = re.search(r"id=([a-zA-Z0-9-_]+)", file_url)

            if file_id_match:
                file_id = file_id_match.group(1)
                # Use export=download for Google Drive files
                file_url = f"https://drive.google.com/uc?id={file_id}&export=download"

        async with aiohttp.ClientSession() as session:
            async with session.get(
                file_url, timeout=aiohttp.ClientTimeout(total=15), allow_redirects=True
            ) as resp:
                if resp.status != 200:
                    return f"[Could not fetch file: HTTP {resp.status}]"

                content = await resp.read()

                if len(content) > max_size:
                    return f"[File too large to process: {len(content) / 1024 / 1024:.1f}MB, max 10MB]"

                # Try to decode as text
                try:
                    text = content.decode("utf-8")
                    # Limit preview to first 3000 chars
                    return text[:3000]
                except UnicodeDecodeError:
                    # For binary files, return a note
                    return f"[Binary file detected. Size: {len(content) / 1024:.1f}KB. Please describe what you see in it.]"
    except asyncio.TimeoutError:
        return "[File fetch timed out - file may be too large or URL invalid]"
    except Exception as e:
        return f"[Could not fetch file: {str(e)[:100]}]"


def build_prompt(
    user_input: str, history: List[Dict[str, str]], has_web_context: bool = False
) -> str:
    # Qwen 2.5 chat format with optional web context awareness
    system = (
        "You are Hannah 1.0, an intelligent, fast, and helpful AI assistant. "
        "Answer clearly and accurately. "
    )

    # If web context is available, instruct the model to use it
    if has_web_context:
        system += (
            "You have been provided with fresh web search context in the user's message. "
            "Use this context to provide current, accurate information about recent events and dates. "
            "Reference the sources when relevant. "
        )

    system += (
        "Keep responses concise but helpful. "
        "If asked about your model or training details, simply say: 'I'm Hannah - a helpful AI assistant.' "
        "Do not discuss GGUF files or internal implementation details."
    )

    parts: List[str] = ["<|im_start|>system\n" + system + "<|im_end|>\n"]

    # Keep a small window of history for speed
    for msg in history[-12:]:
        role = msg.get("role")
        content = msg.get("content") or ""
        if role not in ("user", "assistant"):
            continue
        parts.append(f"<|im_start|>{role}\n{content}<|im_end|>\n")

    parts.append(f"<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n")
    return "".join(parts)


@app.post("/api/chat")
async def chat(request: Request):
    data = await request.json()
    user_input = (data.get("message") or "").strip()
    model_file = data.get("model")
    history = data.get("history") or []
    has_web = data.get("internet", False)  # Check if web search was enabled

    if not user_input:
        raise HTTPException(status_code=400, detail="Empty message")

    # Extract and fetch file URLs from the message
    file_urls = extract_file_urls(user_input)
    file_content_parts = []

    if file_urls:
        for url in file_urls:
            print(f"[File Processing] Fetching: {url[:80]}...")
            content = await fetch_file_from_url(url)
            if content:
                file_content_parts.append(content)

        # Append file contents to user input so the model can process them
        if file_content_parts:
            file_section = "\n\n[File Contents Retrieved]:\n" + "\n---\n".join(
                file_content_parts
            )
            user_input = user_input + file_section

    llm = get_model(model_file)

    # Detect if the message includes web context
    has_web_context = has_web and "[Web context retrieved on" in user_input

    def iter_response():
        prompt = build_prompt(user_input, history, has_web_context=has_web_context)

        stream = llm(
            prompt,
            max_tokens=4096,  # Increased from 2048 for better responses
            stop=["<|im_end|>", "User:", "System:"],
            stream=True,
        )

        for output in stream:
            token_text = output["choices"][0]["text"]
            yield json.dumps({"text": token_text}) + "\n"

    # NDJSON stream (frontend splits by newlines)
    return StreamingResponse(iter_response(), media_type="application/x-ndjson")