Spaces:
Configuration error
Configuration error
Upload Linny-Web-Server.py
Browse files- Linny-Web-Server.py +501 -0
Linny-Web-Server.py
ADDED
|
@@ -0,0 +1,501 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import time
|
| 4 |
+
import uuid
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
from flask import Flask, request, Response, stream_with_context, jsonify
|
| 8 |
+
from werkzeug.utils import secure_filename
|
| 9 |
+
|
| 10 |
+
# ==========================================
|
| 11 |
+
# ⚙️ GLOBAL CONFIGURATION
|
| 12 |
+
# ==========================================
|
| 13 |
+
ADMIN_PASSWORD = "admin123" # Change this to a secure password
|
| 14 |
+
UPLOAD_FOLDER = "./models"
|
| 15 |
+
DEFAULT_CHECKPOINT = "/Users/lucienkachadoorian/Downloads/Linny_Speed_Chatbot-Pro-custom-Gen1.pt" # App tries to load this on startup if it exists
|
| 16 |
+
|
| 17 |
+
# Constants for Generation
|
| 18 |
+
USER_TAG = "### Instruction:"
|
| 19 |
+
BOT_TAG = "### Response:"
|
| 20 |
+
EOS_TOKEN = "<|end|>"
|
| 21 |
+
|
| 22 |
+
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
|
| 23 |
+
|
| 24 |
+
# Hardware Setup
|
| 25 |
+
if torch.backends.mps.is_available():
|
| 26 |
+
device = torch.device("mps")
|
| 27 |
+
print("🚀 Powered by: Apple Metal (MPS)")
|
| 28 |
+
elif torch.cuda.is_available():
|
| 29 |
+
device = torch.device("cuda")
|
| 30 |
+
print("🚀 Powered by: NVIDIA CUDA")
|
| 31 |
+
else:
|
| 32 |
+
device = torch.device("cpu")
|
| 33 |
+
print("⚠️ GPU not available, using CPU.")
|
| 34 |
+
|
| 35 |
+
# ==========================================
|
| 36 |
+
# 🧠 MODEL ARCHITECTURE (From your code)
|
| 37 |
+
# ==========================================
|
| 38 |
+
class LSTMCharLM(nn.Module):
|
| 39 |
+
def __init__(self, vocab_size, embed_size, hidden_size, num_layers, dropout=0.2):
|
| 40 |
+
super().__init__()
|
| 41 |
+
self.embed = nn.Embedding(vocab_size, embed_size)
|
| 42 |
+
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers=num_layers,
|
| 43 |
+
batch_first=True, dropout=dropout)
|
| 44 |
+
self.fc = nn.Linear(hidden_size, vocab_size)
|
| 45 |
+
|
| 46 |
+
def forward(self, x, hidden=None):
|
| 47 |
+
e = self.embed(x)
|
| 48 |
+
out, hidden = self.lstm(e, hidden)
|
| 49 |
+
logits = self.fc(out)
|
| 50 |
+
return logits, hidden
|
| 51 |
+
|
| 52 |
+
# ==========================================
|
| 53 |
+
# 🛠️ INFERENCE ENGINE
|
| 54 |
+
# ==========================================
|
| 55 |
+
class LinnyChat:
|
| 56 |
+
def __init__(self, checkpoint_path, embed_size, neurons, hidden_layers, dropout=0.2):
|
| 57 |
+
print(f"🧠 Loading Linny's brain from: {checkpoint_path}")
|
| 58 |
+
try:
|
| 59 |
+
ckpt = torch.load(checkpoint_path, map_location=device)
|
| 60 |
+
self.chars = ckpt['chars']
|
| 61 |
+
self.stoi = {ch: i for i, ch in enumerate(self.chars)}
|
| 62 |
+
self.itos = {i: ch for i, ch in enumerate(self.chars)}
|
| 63 |
+
self.vocab_size = len(self.chars)
|
| 64 |
+
|
| 65 |
+
self.model = LSTMCharLM(self.vocab_size, embed_size, neurons, hidden_layers, dropout).to(device)
|
| 66 |
+
self.model.load_state_dict(ckpt['model_state'])
|
| 67 |
+
self.model.eval()
|
| 68 |
+
self.ready = True
|
| 69 |
+
print("✅ Model Online and ready.")
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print(f"❌ Failed to load model: {e}")
|
| 72 |
+
self.ready = False
|
| 73 |
+
|
| 74 |
+
def stream_generate(self, prompt, temperature=0.7, max_len=1575, penalty=1.2, top_p=0.9, top_k=50):
|
| 75 |
+
if not self.ready:
|
| 76 |
+
yield "Model not properly loaded."
|
| 77 |
+
return
|
| 78 |
+
|
| 79 |
+
formatted = f"{USER_TAG}\n{prompt}\n\n{BOT_TAG}\n"
|
| 80 |
+
input_ids = [self.stoi.get(c, 0) for c in formatted]
|
| 81 |
+
input_tensor = torch.tensor([input_ids], dtype=torch.long).to(device)
|
| 82 |
+
|
| 83 |
+
hidden = None
|
| 84 |
+
generated_chars = ""
|
| 85 |
+
|
| 86 |
+
with torch.no_grad():
|
| 87 |
+
_, hidden = self.model(input_tensor, hidden)
|
| 88 |
+
input_token = input_tensor[:, -1:]
|
| 89 |
+
|
| 90 |
+
for _ in range(max_len):
|
| 91 |
+
logits, hidden = self.model(input_token, hidden)
|
| 92 |
+
logits = logits[0, -1] / temperature
|
| 93 |
+
|
| 94 |
+
# Repetition Penalty
|
| 95 |
+
recent_chars = generated_chars[-30:]
|
| 96 |
+
for char in set(recent_chars):
|
| 97 |
+
char_idx = self.stoi.get(char, 0)
|
| 98 |
+
if logits[char_idx] > 0:
|
| 99 |
+
logits[char_idx] /= penalty
|
| 100 |
+
else:
|
| 101 |
+
logits[char_idx] *= penalty
|
| 102 |
+
|
| 103 |
+
# Top-K
|
| 104 |
+
if top_k > 0:
|
| 105 |
+
top_k_values, _ = torch.topk(logits, min(top_k, len(logits)))
|
| 106 |
+
logits[logits < top_k_values[-1]] = float('-inf')
|
| 107 |
+
|
| 108 |
+
# Top-P
|
| 109 |
+
if top_p < 1.0:
|
| 110 |
+
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
| 111 |
+
cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
|
| 112 |
+
sorted_indices_to_remove = cumulative_probs > top_p
|
| 113 |
+
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
| 114 |
+
sorted_indices_to_remove[..., 0] = 0
|
| 115 |
+
indices_to_remove = sorted_indices[sorted_indices_to_remove]
|
| 116 |
+
logits[indices_to_remove] = float('-inf')
|
| 117 |
+
|
| 118 |
+
probs = torch.softmax(logits, dim=0)
|
| 119 |
+
idx = torch.multinomial(probs, 1).item()
|
| 120 |
+
char = self.itos[idx]
|
| 121 |
+
|
| 122 |
+
if char == EOS_TOKEN:
|
| 123 |
+
break
|
| 124 |
+
|
| 125 |
+
if char == "#" and len(generated_chars) >= 2:
|
| 126 |
+
if generated_chars[-1] == "#" and generated_chars[-2] == "#":
|
| 127 |
+
break # Prevent "###" bleeding
|
| 128 |
+
|
| 129 |
+
yield char
|
| 130 |
+
generated_chars += char
|
| 131 |
+
input_token = torch.tensor([[idx]], dtype=torch.long).to(device)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# ==========================================
|
| 135 |
+
# 🌐 FLASK WEB SERVER
|
| 136 |
+
# ==========================================
|
| 137 |
+
app = Flask(__name__)
|
| 138 |
+
app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 1024 # 1GB max upload
|
| 139 |
+
|
| 140 |
+
# Dictionary to hold active models in memory mapping model_id -> LinnyChat instance
|
| 141 |
+
active_models = {}
|
| 142 |
+
|
| 143 |
+
# Try to load the default model if it exists
|
| 144 |
+
if os.path.exists(os.path.join(UPLOAD_FOLDER, DEFAULT_CHECKPOINT)):
|
| 145 |
+
active_models["default"] = LinnyChat(os.path.join(UPLOAD_FOLDER, DEFAULT_CHECKPOINT), 384, 768, 5, 0.2)
|
| 146 |
+
else:
|
| 147 |
+
active_models["default"] = None
|
| 148 |
+
print("⚠️ No default model found. Admin needs to upload one.")
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
HTML_TEMPLATE = """
|
| 152 |
+
<!DOCTYPE html>
|
| 153 |
+
<html lang="en">
|
| 154 |
+
<head>
|
| 155 |
+
<meta charset="UTF-8">
|
| 156 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 157 |
+
<title>Linny AI</title>
|
| 158 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
| 159 |
+
<style>
|
| 160 |
+
body { background-color: #0f172a; color: #f8fafc; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; }
|
| 161 |
+
.chat-container { height: calc(100vh - 160px); overflow-y: auto; scroll-behavior: smooth; }
|
| 162 |
+
.message { max-width: 85%; line-height: 1.6; }
|
| 163 |
+
.msg-user { background-color: #3b82f6; border-radius: 1rem 1rem 0 1rem; margin-left: auto; }
|
| 164 |
+
.msg-bot { background-color: #1e293b; border-radius: 1rem 1rem 1rem 0; margin-right: auto; }
|
| 165 |
+
|
| 166 |
+
/* The Magic Reasoning Block Styles */
|
| 167 |
+
.reasoning-block {
|
| 168 |
+
background-color: #020617;
|
| 169 |
+
color: #94a3b8;
|
| 170 |
+
border-left: 4px solid #475569;
|
| 171 |
+
padding: 12px 16px;
|
| 172 |
+
margin: 12px 0;
|
| 173 |
+
border-radius: 0 8px 8px 0;
|
| 174 |
+
font-size: 0.9em;
|
| 175 |
+
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace;
|
| 176 |
+
white-space: pre-wrap;
|
| 177 |
+
}
|
| 178 |
+
.reasoning-header { font-weight: bold; margin-bottom: 8px; color: #cbd5e1; display: flex; align-items: center; gap: 6px; }
|
| 179 |
+
|
| 180 |
+
::-webkit-scrollbar { width: 8px; }
|
| 181 |
+
::-webkit-scrollbar-track { background: #0f172a; }
|
| 182 |
+
::-webkit-scrollbar-thumb { background: #334155; border-radius: 4px; }
|
| 183 |
+
.loader { border: 2px solid #334155; border-top: 2px solid #3b82f6; border-radius: 50%; width: 14px; height: 14px; animation: spin 1s linear infinite; display: inline-block; }
|
| 184 |
+
@keyframes spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } }
|
| 185 |
+
</style>
|
| 186 |
+
</head>
|
| 187 |
+
<body class="flex flex-col h-screen">
|
| 188 |
+
|
| 189 |
+
<!-- Navbar -->
|
| 190 |
+
<header class="bg-slate-900 border-b border-slate-800 p-4 flex justify-between items-center shadow-md">
|
| 191 |
+
<div class="flex items-center gap-3">
|
| 192 |
+
<div class="w-8 h-8 rounded-full bg-blue-500 flex items-center justify-center font-bold text-white shadow-lg shadow-blue-500/50">L</div>
|
| 193 |
+
<h1 class="font-bold text-xl tracking-wide">Linny AI</h1>
|
| 194 |
+
<span id="activeModelBadge" class="ml-2 text-xs bg-slate-800 px-2 py-1 rounded text-slate-400">Model: Default</span>
|
| 195 |
+
</div>
|
| 196 |
+
<div class="flex gap-2">
|
| 197 |
+
<button onclick="openModal('uploadModal')" class="bg-slate-800 hover:bg-slate-700 text-sm px-4 py-2 rounded-lg transition-colors font-medium text-slate-200 shadow">Upload Model</button>
|
| 198 |
+
<button onclick="openModal('adminModal')" class="text-slate-400 hover:text-white px-3 py-2 text-sm transition-colors">Admin</button>
|
| 199 |
+
</div>
|
| 200 |
+
</header>
|
| 201 |
+
|
| 202 |
+
<!-- Chat Area -->
|
| 203 |
+
<main class="flex-1 max-w-4xl w-full mx-auto p-4 flex flex-col relative w-full">
|
| 204 |
+
<div id="chatbox" class="chat-container w-full flex flex-col gap-6 p-2 mb-4">
|
| 205 |
+
<div class="text-center text-slate-500 mt-10">
|
| 206 |
+
<div class="w-16 h-16 rounded-full bg-slate-800 flex items-center justify-center mx-auto mb-4 text-2xl">👋</div>
|
| 207 |
+
<h2 class="text-xl font-medium text-slate-300">Hello, I'm Linny.</h2>
|
| 208 |
+
<p class="text-sm mt-2">I am ready to chat. Type a message below.</p>
|
| 209 |
+
</div>
|
| 210 |
+
</div>
|
| 211 |
+
|
| 212 |
+
<!-- Input Area -->
|
| 213 |
+
<div class="bg-slate-800 rounded-2xl p-2 flex items-end gap-2 shadow-xl border border-slate-700 shrink-0">
|
| 214 |
+
<textarea id="prompt" class="bg-transparent text-white w-full max-h-48 resize-none outline-none p-3 placeholder-slate-400"
|
| 215 |
+
rows="1" placeholder="Message Linny..." oninput="autoGrow(this)" onkeydown="checkEnter(event)"></textarea>
|
| 216 |
+
<button id="sendBtn" onclick="sendMessage()" class="bg-blue-600 hover:bg-blue-500 text-white p-3 rounded-xl mb-1 transition-colors shadow-lg shadow-blue-600/30">
|
| 217 |
+
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><line x1="22" y1="2" x2="11" y2="13"></line><polygon points="22 2 15 22 11 13 2 9 22 2"></polygon></svg>
|
| 218 |
+
</button>
|
| 219 |
+
</div>
|
| 220 |
+
</main>
|
| 221 |
+
|
| 222 |
+
<!-- Modals Background -->
|
| 223 |
+
<div id="modalBackdrop" class="fixed inset-0 bg-black/60 backdrop-blur-sm hidden flex justify-center items-center z-50 transition-opacity">
|
| 224 |
+
|
| 225 |
+
<!-- Upload Modal -->
|
| 226 |
+
<div id="uploadModal" class="hidden bg-slate-900 border border-slate-700 p-6 rounded-2xl max-w-md w-full shadow-2xl">
|
| 227 |
+
<h2 class="text-xl font-bold mb-4 flex items-center gap-2"><svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"></path><polyline points="17 8 12 3 7 8"></polyline><line x1="12" y1="3" x2="12" y2="15"></line></svg> Upload .pt Model</h2>
|
| 228 |
+
<p class="text-sm text-slate-400 mb-4">Configure your architecture settings to match your training exactly.</p>
|
| 229 |
+
|
| 230 |
+
<input type="file" id="modelFile" accept=".pt" class="block w-full text-sm text-slate-400 file:mr-4 file:py-2 file:px-4 file:rounded-full file:border-0 file:text-sm file:font-semibold file:bg-blue-600 file:text-white hover:file:bg-blue-500 mb-4 bg-slate-800 rounded-lg p-2"/>
|
| 231 |
+
|
| 232 |
+
<div class="grid grid-cols-2 gap-4 mb-4">
|
| 233 |
+
<div>
|
| 234 |
+
<label class="text-xs text-slate-400 font-bold uppercase block mb-1">Hidden Layers</label>
|
| 235 |
+
<input type="number" id="cfgLayers" value="5" class="w-full bg-slate-800 border border-slate-700 rounded p-2 text-white">
|
| 236 |
+
</div>
|
| 237 |
+
<div>
|
| 238 |
+
<label class="text-xs text-slate-400 font-bold uppercase block mb-1">Neurons</label>
|
| 239 |
+
<input type="number" id="cfgNeurons" value="768" class="w-full bg-slate-800 border border-slate-700 rounded p-2 text-white">
|
| 240 |
+
</div>
|
| 241 |
+
<div>
|
| 242 |
+
<label class="text-xs text-slate-400 font-bold uppercase block mb-1">Embed Size</label>
|
| 243 |
+
<input type="number" id="cfgEmbed" value="384" class="w-full bg-slate-800 border border-slate-700 rounded p-2 text-white">
|
| 244 |
+
</div>
|
| 245 |
+
<div>
|
| 246 |
+
<label class="text-xs text-slate-400 font-bold uppercase block mb-1">Dropout</label>
|
| 247 |
+
<input type="number" step="0.1" id="cfgDropout" value="0.2" class="w-full bg-slate-800 border border-slate-700 rounded p-2 text-white">
|
| 248 |
+
</div>
|
| 249 |
+
</div>
|
| 250 |
+
|
| 251 |
+
<div class="flex gap-2 justify-end mt-6">
|
| 252 |
+
<button onclick="closeModals()" class="px-4 py-2 rounded text-slate-400 hover:text-white">Cancel</button>
|
| 253 |
+
<button onclick="uploadModel()" id="uploadBtn" class="px-4 py-2 bg-blue-600 hover:bg-blue-500 rounded text-white shadow font-medium">Load & Use Model</button>
|
| 254 |
+
</div>
|
| 255 |
+
</div>
|
| 256 |
+
|
| 257 |
+
<!-- Admin Modal -->
|
| 258 |
+
<div id="adminModal" class="hidden bg-slate-900 border border-slate-700 p-6 rounded-2xl max-w-sm w-full shadow-2xl">
|
| 259 |
+
<h2 class="text-xl font-bold mb-4 text-red-400">Admin Control</h2>
|
| 260 |
+
<p class="text-sm text-slate-400 mb-4">Set your currently loaded model as the global default for all new visitors.</p>
|
| 261 |
+
<input type="password" id="adminPass" placeholder="Admin Password" class="w-full bg-slate-800 border border-slate-700 rounded p-2 text-white mb-4">
|
| 262 |
+
<div class="flex gap-2 justify-end">
|
| 263 |
+
<button onclick="closeModals()" class="px-4 py-2 rounded text-slate-400 hover:text-white">Cancel</button>
|
| 264 |
+
<button onclick="setAsDefault()" class="px-4 py-2 bg-red-600 hover:bg-red-500 rounded text-white shadow font-medium">Set Global Default</button>
|
| 265 |
+
</div>
|
| 266 |
+
</div>
|
| 267 |
+
</div>
|
| 268 |
+
|
| 269 |
+
<script>
|
| 270 |
+
let currentModelId = "default";
|
| 271 |
+
|
| 272 |
+
function autoGrow(element) {
|
| 273 |
+
element.style.height = "5px";
|
| 274 |
+
element.style.height = (element.scrollHeight) + "px";
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
function checkEnter(e) {
|
| 278 |
+
if (e.key === 'Enter' && !e.shiftKey) {
|
| 279 |
+
e.preventDefault();
|
| 280 |
+
sendMessage();
|
| 281 |
+
}
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
function openModal(id) {
|
| 285 |
+
document.getElementById('modalBackdrop').classList.remove('hidden');
|
| 286 |
+
document.getElementById('uploadModal').classList.add('hidden');
|
| 287 |
+
document.getElementById('adminModal').classList.add('hidden');
|
| 288 |
+
document.getElementById(id).classList.remove('hidden');
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
function closeModals() {
|
| 292 |
+
document.getElementById('modalBackdrop').classList.add('hidden');
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
async function uploadModel() {
|
| 296 |
+
const fileInput = document.getElementById('modelFile');
|
| 297 |
+
if(!fileInput.files.length) return alert("Please select a .pt file");
|
| 298 |
+
|
| 299 |
+
const btn = document.getElementById('uploadBtn');
|
| 300 |
+
btn.innerHTML = `<span class="loader mr-2"></span> Loading...`;
|
| 301 |
+
btn.disabled = true;
|
| 302 |
+
|
| 303 |
+
const formData = new FormData();
|
| 304 |
+
formData.append('file', fileInput.files[0]);
|
| 305 |
+
formData.append('layers', document.getElementById('cfgLayers').value);
|
| 306 |
+
formData.append('neurons', document.getElementById('cfgNeurons').value);
|
| 307 |
+
formData.append('embed', document.getElementById('cfgEmbed').value);
|
| 308 |
+
formData.append('dropout', document.getElementById('cfgDropout').value);
|
| 309 |
+
|
| 310 |
+
try {
|
| 311 |
+
const res = await fetch('/api/upload', { method: 'POST', body: formData });
|
| 312 |
+
const data = await res.json();
|
| 313 |
+
if(data.success) {
|
| 314 |
+
currentModelId = data.model_id;
|
| 315 |
+
document.getElementById('activeModelBadge').innerText = "Model: Custom Session";
|
| 316 |
+
document.getElementById('activeModelBadge').classList.replace('text-slate-400', 'text-blue-400');
|
| 317 |
+
closeModals();
|
| 318 |
+
alert("Model successfully loaded into memory!");
|
| 319 |
+
} else {
|
| 320 |
+
alert("Error loading model: " + data.error);
|
| 321 |
+
}
|
| 322 |
+
} catch(e) {
|
| 323 |
+
alert("Upload failed.");
|
| 324 |
+
}
|
| 325 |
+
btn.innerHTML = "Load & Use Model";
|
| 326 |
+
btn.disabled = false;
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
async function setAsDefault() {
|
| 330 |
+
const pass = document.getElementById('adminPass').value;
|
| 331 |
+
const res = await fetch('/api/set_default', {
|
| 332 |
+
method: 'POST',
|
| 333 |
+
headers: {'Content-Type': 'application/json'},
|
| 334 |
+
body: JSON.stringify({ password: pass, model_id: currentModelId })
|
| 335 |
+
});
|
| 336 |
+
const data = await res.json();
|
| 337 |
+
if(data.success) {
|
| 338 |
+
alert("Global default successfully updated!");
|
| 339 |
+
closeModals();
|
| 340 |
+
} else {
|
| 341 |
+
alert("Failed: " + data.error);
|
| 342 |
+
}
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
// --- Core Chat & Formatting Logic ---
|
| 346 |
+
|
| 347 |
+
function formatMessage(text) {
|
| 348 |
+
// Escape HTML safely
|
| 349 |
+
let safeText = text.replace(/</g, "<").replace(/>/g, ">");
|
| 350 |
+
|
| 351 |
+
// Dynamic parsing of <think> tags for UI
|
| 352 |
+
let html = safeText
|
| 353 |
+
.replace(/<think>/g, '<div class="reasoning-block"><div class="reasoning-header"><svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><circle cx="12" cy="12" r="10"></circle><path d="M9.09 9a3 3 0 0 1 5.83 1c0 2-3 3-3 3"></path><line x1="12" y1="17" x2="12.01" y2="17"></line></svg> Reasoning Process</div>')
|
| 354 |
+
.replace(/<\/think>/g, '</div>');
|
| 355 |
+
|
| 356 |
+
// Auto-close unclosed think tags during streaming
|
| 357 |
+
let openTags = (html.match(/<div class="reasoning-block">/g) || []).length;
|
| 358 |
+
let closeTags = (html.match(/<\/div>/g) || []).length; // (Simplified, relies on structure)
|
| 359 |
+
if (openTags > closeTags) {
|
| 360 |
+
html += '</div>';
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
// Fix formatting for line breaks
|
| 364 |
+
return html;
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
async function sendMessage() {
|
| 368 |
+
const promptEl = document.getElementById('prompt');
|
| 369 |
+
const promptText = promptEl.value.trim();
|
| 370 |
+
if(!promptText) return;
|
| 371 |
+
|
| 372 |
+
promptEl.value = "";
|
| 373 |
+
promptEl.style.height = "auto";
|
| 374 |
+
|
| 375 |
+
const chatbox = document.getElementById('chatbox');
|
| 376 |
+
|
| 377 |
+
// Remove welcome message if exists
|
| 378 |
+
const textCenter = chatbox.querySelector('.text-center.mt-10');
|
| 379 |
+
if(textCenter) textCenter.remove();
|
| 380 |
+
|
| 381 |
+
// Append User Message
|
| 382 |
+
chatbox.innerHTML += `<div class="message msg-user p-4 shadow"><p class="whitespace-pre-wrap">${promptText.replace(/</g,"<")}</p></div>`;
|
| 383 |
+
|
| 384 |
+
// Create Bot Message container
|
| 385 |
+
const botId = 'bot-' + Date.now();
|
| 386 |
+
chatbox.innerHTML += `<div class="message msg-bot p-4 shadow border border-slate-700" id="${botId}"><span class="loader"></span></div>`;
|
| 387 |
+
chatbox.scrollTop = chatbox.scrollHeight;
|
| 388 |
+
|
| 389 |
+
try {
|
| 390 |
+
const response = await fetch('/api/chat', {
|
| 391 |
+
method: 'POST',
|
| 392 |
+
headers: { 'Content-Type': 'application/json' },
|
| 393 |
+
body: JSON.stringify({ prompt: promptText, model_id: currentModelId })
|
| 394 |
+
});
|
| 395 |
+
|
| 396 |
+
if (!response.ok) throw new Error("Server error");
|
| 397 |
+
|
| 398 |
+
const reader = response.body.getReader();
|
| 399 |
+
const decoder = new TextDecoder("utf-8");
|
| 400 |
+
let fullText = "";
|
| 401 |
+
const botEl = document.getElementById(botId);
|
| 402 |
+
|
| 403 |
+
while (true) {
|
| 404 |
+
const { done, value } = await reader.read();
|
| 405 |
+
if (done) break;
|
| 406 |
+
|
| 407 |
+
fullText += decoder.decode(value, { stream: true });
|
| 408 |
+
botEl.innerHTML = formatMessage(fullText);
|
| 409 |
+
chatbox.scrollTop = chatbox.scrollHeight;
|
| 410 |
+
}
|
| 411 |
+
} catch (err) {
|
| 412 |
+
document.getElementById(botId).innerHTML = `<span class="text-red-400">Error connecting to server. Is the Python script running?</span>`;
|
| 413 |
+
}
|
| 414 |
+
}
|
| 415 |
+
</script>
|
| 416 |
+
</body>
|
| 417 |
+
</html>
|
| 418 |
+
"""
|
| 419 |
+
|
| 420 |
+
# ==========================================
|
| 421 |
+
# 🛣️ ROUTES
|
| 422 |
+
# ==========================================
|
| 423 |
+
@app.route('/')
|
| 424 |
+
def index():
|
| 425 |
+
return HTML_TEMPLATE
|
| 426 |
+
|
| 427 |
+
@app.route('/api/chat', methods=['POST'])
|
| 428 |
+
def chat():
|
| 429 |
+
data = request.json
|
| 430 |
+
prompt = data.get('prompt', '')
|
| 431 |
+
model_id = data.get('model_id', 'default')
|
| 432 |
+
|
| 433 |
+
chatbot = active_models.get(model_id) or active_models.get('default')
|
| 434 |
+
|
| 435 |
+
if chatbot is None or not chatbot.ready:
|
| 436 |
+
def err():
|
| 437 |
+
yield "Server error: Model is not loaded properly. Upload a model first."
|
| 438 |
+
return Response(stream_with_context(err()), mimetype='text/plain')
|
| 439 |
+
|
| 440 |
+
def generate():
|
| 441 |
+
for char in chatbot.stream_generate(prompt):
|
| 442 |
+
yield char
|
| 443 |
+
|
| 444 |
+
return Response(stream_with_context(generate()), mimetype='text/plain')
|
| 445 |
+
|
| 446 |
+
@app.route('/api/upload', methods=['POST'])
|
| 447 |
+
def upload_model():
|
| 448 |
+
if 'file' not in request.files:
|
| 449 |
+
return jsonify({"success": False, "error": "No file uploaded"})
|
| 450 |
+
|
| 451 |
+
file = request.files['file']
|
| 452 |
+
if file.filename == '':
|
| 453 |
+
return jsonify({"success": False, "error": "Empty filename"})
|
| 454 |
+
|
| 455 |
+
if file and file.filename.endswith('.pt'):
|
| 456 |
+
try:
|
| 457 |
+
# Generate unique ID for this session's model
|
| 458 |
+
model_id = str(uuid.uuid4())
|
| 459 |
+
filename = secure_filename(f"{model_id}.pt")
|
| 460 |
+
filepath = os.path.join(UPLOAD_FOLDER, filename)
|
| 461 |
+
file.save(filepath)
|
| 462 |
+
|
| 463 |
+
# Get configuration
|
| 464 |
+
layers = int(request.form.get('layers', 5))
|
| 465 |
+
neurons = int(request.form.get('neurons', 768))
|
| 466 |
+
embed = int(request.form.get('embed', 384))
|
| 467 |
+
dropout = float(request.form.get('dropout', 0.2))
|
| 468 |
+
|
| 469 |
+
# Load into memory
|
| 470 |
+
chatbot = LinnyChat(filepath, embed, neurons, layers, dropout)
|
| 471 |
+
if chatbot.ready:
|
| 472 |
+
active_models[model_id] = chatbot
|
| 473 |
+
return jsonify({"success": True, "model_id": model_id})
|
| 474 |
+
else:
|
| 475 |
+
return jsonify({"success": False, "error": "Model failed to initialize. Check config."})
|
| 476 |
+
except Exception as e:
|
| 477 |
+
return jsonify({"success": False, "error": str(e)})
|
| 478 |
+
|
| 479 |
+
return jsonify({"success": False, "error": "Invalid file type. Only .pt allowed."})
|
| 480 |
+
|
| 481 |
+
@app.route('/api/set_default', methods=['POST'])
|
| 482 |
+
def set_default():
|
| 483 |
+
data = request.json
|
| 484 |
+
if data.get('password') != ADMIN_PASSWORD:
|
| 485 |
+
return jsonify({"success": False, "error": "Invalid Admin Password"})
|
| 486 |
+
|
| 487 |
+
target_id = data.get('model_id')
|
| 488 |
+
if target_id in active_models:
|
| 489 |
+
# Promote session model to default global model
|
| 490 |
+
active_models['default'] = active_models[target_id]
|
| 491 |
+
return jsonify({"success": True})
|
| 492 |
+
|
| 493 |
+
return jsonify({"success": False, "error": "Model not found in memory"})
|
| 494 |
+
|
| 495 |
+
if __name__ == "__main__":
|
| 496 |
+
print("\n" + "="*50)
|
| 497 |
+
print("🌍 LINNY WEB SERVER STARTING...")
|
| 498 |
+
print("="*50)
|
| 499 |
+
print("Open your browser to: http://127.0.0.1:6000\n")
|
| 500 |
+
# Setting threaded=True handles multiple users hitting the chat endpoint concurrently
|
| 501 |
+
app.run(host='0.0.0.0', port=6000, threaded=True)
|