# -*- coding: utf-8 -*- """ MTP 1.0 API - RESPUESTAS COMPLETAS - Sin cortes artificiales - El modelo decide cuándo terminar - Respuestas naturales y coherentes - Máximo 250 tokens (suficiente para respuestas completas) """ import os import sys import torch import json import time import gc import re from fastapi import FastAPI from fastapi.responses import HTMLResponse from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel, Field from huggingface_hub import snapshot_download import uvicorn import math import torch.nn as nn import torch.nn.functional as F import sentencepiece as spm # ====================== # OPTIMIZACIONES # ====================== if torch.cuda.is_available(): DEVICE = "cuda" print("✅ GPU detectada") else: DEVICE = "cpu" torch.set_num_threads(min(2, os.cpu_count() or 2)) torch.set_num_interop_threads(1) torch.set_grad_enabled(False) print("⚠️ Usando CPU optimizado") MODEL_REPO = "TeszenAI/dango" # ====================== # ARQUITECTURA MTP 1.0 # ====================== class RMSNorm(nn.Module): __slots__ = ('weight', 'eps') def __init__(self, d_model, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(d_model)) self.eps = eps def forward(self, x): rms = torch.sqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) return self.weight * (x / rms) class SwiGLU(nn.Module): __slots__ = ('w1', 'w2', 'w3') def __init__(self, d_model, d_ff): super().__init__() self.w1 = nn.Linear(d_model, d_ff, bias=False) self.w2 = nn.Linear(d_ff, d_model, bias=False) self.w3 = nn.Linear(d_model, d_ff, bias=False) def forward(self, x): return self.w2(F.silu(self.w1(x)) * self.w3(x)) class RotaryPositionalEmbedding(nn.Module): __slots__ = ('inv_freq',) def __init__(self, d_model, max_len=512): super().__init__() inv_freq = 1.0 / (10000 ** (torch.arange(0, d_model, 2).float() / d_model)) self.register_buffer('inv_freq', inv_freq) def forward(self, x, seq_len=None): if seq_len is None: seq_len = x.shape[1] t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq) freqs = torch.einsum('i,j->ij', t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) return torch.cos(emb), torch.sin(emb) class RotaryMultiHeadAttention(nn.Module): __slots__ = ('n_heads', 'd_k', 'w_q', 'w_k', 'w_v', 'w_o', 'dropout', 'scale', 'rotary') def __init__(self, d_model, n_heads, dropout=0.1): super().__init__() assert d_model % n_heads == 0 self.n_heads = n_heads self.d_k = d_model // n_heads self.w_q = nn.Linear(d_model, d_model, bias=False) self.w_k = nn.Linear(d_model, d_model, bias=False) self.w_v = nn.Linear(d_model, d_model, bias=False) self.w_o = nn.Linear(d_model, d_model, bias=False) self.dropout = nn.Dropout(dropout) self.scale = math.sqrt(self.d_k) self.rotary = RotaryPositionalEmbedding(self.d_k) def forward(self, x, mask=None): b, s, _ = x.shape cos, sin = self.rotary(x, s) Q = self.w_q(x).view(b, s, self.n_heads, self.d_k).transpose(1, 2) K = self.w_k(x).view(b, s, self.n_heads, self.d_k).transpose(1, 2) V = self.w_v(x).view(b, s, self.n_heads, self.d_k).transpose(1, 2) Q_rot = Q * cos.unsqueeze(0).unsqueeze(0) + self._rotate_half(Q) * sin.unsqueeze(0).unsqueeze(0) K_rot = K * cos.unsqueeze(0).unsqueeze(0) + self._rotate_half(K) * sin.unsqueeze(0).unsqueeze(0) scores = torch.matmul(Q_rot, K_rot.transpose(-2, -1)) / self.scale if mask is not None: scores = scores.masked_fill(mask == 0, float('-inf')) attn = self.dropout(F.softmax(scores, dim=-1)) out = torch.matmul(attn, V).transpose(1, 2).contiguous().view(b, s, -1) return self.w_o(out) def _rotate_half(self, x): x1, x2 = x.chunk(2, dim=-1) return torch.cat((-x2, x1), dim=-1) class TransformerBlock(nn.Module): __slots__ = ('attn', 'ff', 'norm1', 'norm2', 'dropout1', 'dropout2') def __init__(self, d_model, n_heads, d_ff, dropout=0.1): super().__init__() self.attn = RotaryMultiHeadAttention(d_model, n_heads, dropout) self.ff = SwiGLU(d_model, d_ff) self.norm1 = RMSNorm(d_model) self.norm2 = RMSNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x, mask=None): x = x + self.dropout1(self.attn(self.norm1(x), mask)) x = x + self.dropout2(self.ff(self.norm2(x))) return x class MTP1Model(nn.Module): def __init__(self, vocab_size, d_model=512, n_heads=16, n_layers=8, d_ff=2048, dropout=0.1, max_len=512): super().__init__() self.vocab_size = vocab_size self.d_model = d_model self.max_len = max_len self.embedding = nn.Embedding(vocab_size, d_model) self.blocks = nn.ModuleList([TransformerBlock(d_model, n_heads, d_ff, dropout) for _ in range(n_layers)]) self.norm = RMSNorm(d_model) self.lm_head = nn.Linear(d_model, vocab_size) self.dropout = nn.Dropout(dropout) def forward(self, x): seq_len = x.size(1) mask = torch.tril(torch.ones(seq_len, seq_len)).unsqueeze(0).unsqueeze(0).to(x.device) x = self.embedding(x) * math.sqrt(self.d_model) x = self.dropout(x) for block in self.blocks: x = block(x, mask) return self.lm_head(self.norm(x)) @torch.no_grad() def generate(self, input_ids, max_new=200, temperature=0.45, top_k=30, top_p=0.88, repetition_penalty=1.2): """Generación sin cortes artificiales - el modelo decide cuándo parar""" generated = input_ids eos_id = 3 last_tokens = [] for step in range(max_new): if generated.size(1) > self.max_len: context = generated[:, -self.max_len:] else: context = generated logits = self(context) next_logits = logits[0, -1, :].clone() / temperature if repetition_penalty != 1.0: for token_id in set(generated[0].tolist()): next_logits[token_id] /= repetition_penalty if top_k > 0: indices = next_logits < torch.topk(next_logits, top_k)[0][..., -1, None] next_logits[indices] = float('-inf') if top_p < 1.0: sorted_logits, sorted_indices = torch.sort(next_logits, descending=True) cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) remove = cum_probs > top_p remove[..., 1:] = remove[..., :-1].clone() remove[..., 0] = 0 indices = sorted_indices[remove] next_logits[indices] = float('-inf') probs = F.softmax(next_logits, dim=-1) next_token = torch.multinomial(probs, 1).item() last_tokens.append(next_token) if len(last_tokens) > 6 and len(set(last_tokens)) <= 2: break if next_token == eos_id or next_token == 0: break generated = torch.cat([generated, torch.tensor([[next_token]], device=generated.device)], dim=1) # Parada natural: si encontramos un punto y llevamos suficientes tokens if step > 30: # Decodificar últimos tokens para ver si hay punto final recent = generated[0][-5:].tolist() # El token 3 es EOS, 4 podría ser punto dependiendo del tokenizer if 3 in recent: break return generated # ====================== # LIMPIEZA MÍNIMA (SOLO LO ESENCIAL) # ====================== def clean_response(response: str) -> str: """Solo elimina repeticiones y espacios, NO corta el texto""" if not response: return "" # Eliminar repeticiones excesivas de palabras words = response.split() cleaned = [] last = "" for w in words: if w.lower() != last.lower(): cleaned.append(w) last = w response = " ".join(cleaned) # Limpiar espacios múltiples response = re.sub(r'\s+', ' ', response).strip() # Capitalizar primera letra if response and response[0].islower(): response = response[0].upper() + response[1:] # NO cortamos el texto - la respuesta queda completa return response # ====================== # CARGA DEL MODELO # ====================== print(f"📦 Descargando MTP 1.0 desde {MODEL_REPO}...") repo_path = snapshot_download(repo_id=MODEL_REPO, repo_type="model", local_dir="mtp_repo") config_path = os.path.join(repo_path, "config.json") with open(config_path, "r") as f: config = json.load(f) tokenizer_path = os.path.join(repo_path, "mtp_tokenizer.model") sp = spm.SentencePieceProcessor() sp.load(tokenizer_path) config["vocab_size"] = sp.get_piece_size() print(f"🧠 Inicializando MTP 1.0...") print(f" → Vocabulario: {config['vocab_size']} tokens") print(f" → Dimensiones: {config.get('d_model', 512)}") print(f" → Capas: {config.get('n_layers', 8)}") model = MTP1Model(**config) model.to(DEVICE) model.eval() model_path = os.path.join(repo_path, "mtp_model.pt") if os.path.exists(model_path): state_dict = torch.load(model_path, map_location=DEVICE) model.load_state_dict(state_dict, strict=False) print("✅ Pesos cargados") param_count = sum(p.numel() for p in model.parameters()) print(f"✅ MTP 1.0 listo: {param_count:,} parámetros ({param_count/1e6:.2f}M)") # ====================== # API # ====================== app = FastAPI(title="MTP 1.0 API", version="1.0") app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"]) class PromptRequest(BaseModel): text: str = Field(..., max_length=2000) def build_prompt(user_input: str) -> str: return f"### Instrucción:\n{user_input}\n\n### Respuesta:\n" ACTIVE_REQUESTS = 0 @app.post("/generate") async def generate(req: PromptRequest): global ACTIVE_REQUESTS ACTIVE_REQUESTS += 1 user_input = req.text.strip() if not user_input: ACTIVE_REQUESTS -= 1 return {"reply": ""} tokens = sp.encode(build_prompt(user_input))[:400] input_ids = torch.tensor([tokens], device=DEVICE) try: start = time.time() output_ids = model.generate( input_ids, max_new=200, temperature=0.45, top_k=30, top_p=0.88, repetition_penalty=1.2 ) elapsed = time.time() - start gen_tokens = output_ids[0, len(tokens):].tolist() safe_tokens = [t for t in gen_tokens if 0 <= t < config["vocab_size"] and t != 0] response = sp.decode(safe_tokens).strip() if safe_tokens else "" # Limpiar formato for m in ["### Respuesta:", "Respuesta:", "[/INST]", "Asistente:"]: if m in response: response = response.split(m)[-1].strip() break response = clean_response(response) if len(response) < 3: response = "Lo siento, no pude generar una respuesta clara." return { "reply": response, "time": round(elapsed, 2), "tokens": len(safe_tokens), "characters": len(response), "model": "MTP-1.0" } except Exception as e: print(f"Error: {e}") return {"reply": "Lo siento, ocurrió un error."} finally: ACTIVE_REQUESTS -= 1 if DEVICE == "cuda": torch.cuda.empty_cache() gc.collect() @app.get("/health") def health(): return {"status": "ok", "model": "MTP-1.0", "device": DEVICE} @app.get("/info") def info(): return { "model": "MTP-1.0", "version": "1.0", "parameters": param_count, "parameters_millions": round(param_count / 1e6, 2), "device": DEVICE, "d_model": config.get('d_model', 512), "n_layers": config.get('n_layers', 8), "n_heads": config.get('n_heads', 16) } # ====================== # INTERFAZ WEB # ====================== @app.get("/", response_class=HTMLResponse) def chat_ui(): return """
✨ Respuestas completas y naturales | Sin cortes | Inteligente