Upload folder using huggingface_hub
Browse files- .env.example +25 -0
- .github/copilot-instructions.md +26 -0
- .gitignore +141 -0
- .pylintrc +35 -0
- .vscode/tasks.json +13 -0
- Dockerfile +62 -0
- Dockerfile.backup +53 -0
- INSTRUCCIONES.md +93 -0
- README.md +100 -0
- chat_terminal.py +83 -0
- config/__init__.py +1 -0
- config/settings.py +41 -0
- docker-compose.yml +41 -0
- docker-manager.sh +314 -0
- environment.yml +21 -0
- main.py +52 -0
- manage-repos.sh +335 -0
- models/__init__.py +1 -0
- models/model_loader.py +72 -0
- models/text_generator.py +139 -0
- mypy.ini +34 -0
- package.json +43 -0
- requirements-dev.txt +24 -0
- requirements.txt +10 -0
- setup-cli.sh +206 -0
- setup.sh +53 -0
- test_gpt.py +44 -0
- ui/__init__.py +1 -0
- ui/gradio_interface.py +158 -0
- utils.py +137 -0
.env.example
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Variables de entorno para GPT Local
|
| 2 |
+
# Copia este archivo a .env y completa los valores
|
| 3 |
+
|
| 4 |
+
# Hugging Face Token (opcional pero recomendado)
|
| 5 |
+
# Obtén tu token en: https://huggingface.co/settings/tokens
|
| 6 |
+
HUGGINGFACE_TOKEN=hf_your_token_here
|
| 7 |
+
|
| 8 |
+
# GitHub Token (opcional, para repositorios privados)
|
| 9 |
+
# Obtén tu token en: https://github.com/settings/tokens
|
| 10 |
+
GITHUB_TOKEN=ghp_your_token_here
|
| 11 |
+
|
| 12 |
+
# Configuración del modelo por defecto
|
| 13 |
+
DEFAULT_MODEL=microsoft/DialoGPT-small
|
| 14 |
+
|
| 15 |
+
# Configuración de Gradio
|
| 16 |
+
GRADIO_SERVER_NAME=0.0.0.0
|
| 17 |
+
GRADIO_SERVER_PORT=7860
|
| 18 |
+
GRADIO_SHARE=false
|
| 19 |
+
|
| 20 |
+
# Docker configuración
|
| 21 |
+
DOCKER_IMAGE_NAME=gpt-local
|
| 22 |
+
DOCKER_CONTAINER_NAME=gpt-local-app
|
| 23 |
+
|
| 24 |
+
# Configuración de dispositivo (auto, cpu, mps, cuda)
|
| 25 |
+
DEVICE=auto
|
.github/copilot-instructions.md
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!-- Use this file to provide workspace-specific custom instructions to Copilot. For more details, visit https://code.visualstudio.com/docs/copilot/copilot-customization#_use-a-githubcopilotinstructionsmd-file -->
|
| 2 |
+
- [x] Verify that the copilot-instructions.md file in the .github directory is created.
|
| 3 |
+
|
| 4 |
+
- [x] Clarify Project Requirements
|
| 5 |
+
<!-- Proyecto: GPT de texto local usando Hugging Face con Python, interfaz de chat, carga de modelos preentrenados -->
|
| 6 |
+
|
| 7 |
+
- [x] Scaffold the Project
|
| 8 |
+
<!-- Estructura creada: main.py, models/, ui/, config/, requirements.txt -->
|
| 9 |
+
|
| 10 |
+
- [x] Customize the Project
|
| 11 |
+
<!-- Proyecto personalizado con interfaz Gradio, cargador de modelos HuggingFace, generador de texto -->
|
| 12 |
+
|
| 13 |
+
- [x] Install Required Extensions
|
| 14 |
+
<!-- No se requieren extensiones específicas para este proyecto Python -->
|
| 15 |
+
|
| 16 |
+
- [x] Compile the Project
|
| 17 |
+
<!-- Dependencias instaladas exitosamente: PyTorch 2.7.1, Transformers 4.53.0, Gradio 4.44.1 -->
|
| 18 |
+
|
| 19 |
+
- [x] Create and Run Task
|
| 20 |
+
<!-- Tarea creada para ejecutar el GPT Local, versión de terminal funcional -->
|
| 21 |
+
|
| 22 |
+
- [x] Launch the Project
|
| 23 |
+
<!-- Proyecto listo para ejecutar: chat_terminal.py y main.py disponibles -->
|
| 24 |
+
|
| 25 |
+
- [x] Ensure Documentation is Complete
|
| 26 |
+
<!-- README.md y copilot-instructions.md completados con información del proyecto -->
|
.gitignore
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
pip-wheel-metadata/
|
| 24 |
+
share/python-wheels/
|
| 25 |
+
*.egg-info/
|
| 26 |
+
.installed.cfg
|
| 27 |
+
*.egg
|
| 28 |
+
MANIFEST
|
| 29 |
+
|
| 30 |
+
# PyInstaller
|
| 31 |
+
*.manifest
|
| 32 |
+
*.spec
|
| 33 |
+
|
| 34 |
+
# Installer logs
|
| 35 |
+
pip-log.txt
|
| 36 |
+
pip-delete-this-directory.txt
|
| 37 |
+
|
| 38 |
+
# Unit test / coverage reports
|
| 39 |
+
htmlcov/
|
| 40 |
+
.tox/
|
| 41 |
+
.nox/
|
| 42 |
+
.coverage
|
| 43 |
+
.coverage.*
|
| 44 |
+
.cache
|
| 45 |
+
nosetests.xml
|
| 46 |
+
coverage.xml
|
| 47 |
+
*.cover
|
| 48 |
+
*.py,cover
|
| 49 |
+
.hypothesis/
|
| 50 |
+
.pytest_cache/
|
| 51 |
+
|
| 52 |
+
# Translations
|
| 53 |
+
*.mo
|
| 54 |
+
*.pot
|
| 55 |
+
|
| 56 |
+
# Django stuff:
|
| 57 |
+
*.log
|
| 58 |
+
local_settings.py
|
| 59 |
+
db.sqlite3
|
| 60 |
+
db.sqlite3-journal
|
| 61 |
+
|
| 62 |
+
# Flask stuff:
|
| 63 |
+
instance/
|
| 64 |
+
.webassets-cache
|
| 65 |
+
|
| 66 |
+
# Scrapy stuff:
|
| 67 |
+
.scrapy
|
| 68 |
+
|
| 69 |
+
# Sphinx documentation
|
| 70 |
+
docs/_build/
|
| 71 |
+
|
| 72 |
+
# PyBuilder
|
| 73 |
+
target/
|
| 74 |
+
|
| 75 |
+
# Jupyter Notebook
|
| 76 |
+
.ipynb_checkpoints
|
| 77 |
+
|
| 78 |
+
# IPython
|
| 79 |
+
profile_default/
|
| 80 |
+
ipython_config.py
|
| 81 |
+
|
| 82 |
+
# pyenv
|
| 83 |
+
.python-version
|
| 84 |
+
|
| 85 |
+
# pipenv
|
| 86 |
+
Pipfile.lock
|
| 87 |
+
|
| 88 |
+
# PEP 582
|
| 89 |
+
__pypackages__/
|
| 90 |
+
|
| 91 |
+
# Celery stuff
|
| 92 |
+
celerybeat-schedule
|
| 93 |
+
celerybeat.pid
|
| 94 |
+
|
| 95 |
+
# SageMath parsed files
|
| 96 |
+
*.sage.py
|
| 97 |
+
|
| 98 |
+
# Environments
|
| 99 |
+
.env
|
| 100 |
+
.venv
|
| 101 |
+
env/
|
| 102 |
+
venv/
|
| 103 |
+
ENV/
|
| 104 |
+
env.bak/
|
| 105 |
+
venv.bak/
|
| 106 |
+
|
| 107 |
+
# Spyder project settings
|
| 108 |
+
.spyderproject
|
| 109 |
+
.spyproject
|
| 110 |
+
|
| 111 |
+
# Rope project settings
|
| 112 |
+
.ropeproject
|
| 113 |
+
|
| 114 |
+
# mkdocs documentation
|
| 115 |
+
/site
|
| 116 |
+
|
| 117 |
+
# mypy
|
| 118 |
+
.mypy_cache/
|
| 119 |
+
.dmypy.json
|
| 120 |
+
dmypy.json
|
| 121 |
+
|
| 122 |
+
# Pyre type checker
|
| 123 |
+
.pyre/
|
| 124 |
+
|
| 125 |
+
# Hugging Face cache
|
| 126 |
+
models_cache/
|
| 127 |
+
.cache/
|
| 128 |
+
|
| 129 |
+
# macOS
|
| 130 |
+
.DS_Store
|
| 131 |
+
.AppleDouble
|
| 132 |
+
.LSOverride
|
| 133 |
+
|
| 134 |
+
# VS Code
|
| 135 |
+
.vscode/settings.json
|
| 136 |
+
.vscode/launch.json
|
| 137 |
+
|
| 138 |
+
# GPT Local specific
|
| 139 |
+
*.log
|
| 140 |
+
temp/
|
| 141 |
+
downloads/
|
.pylintrc
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[MASTER]
|
| 2 |
+
# Ignorar errores en archivos de configuración
|
| 3 |
+
ignore=.gitignore,setup.sh
|
| 4 |
+
|
| 5 |
+
[MESSAGES CONTROL]
|
| 6 |
+
# Deshabilitar algunos warnings que no son críticos
|
| 7 |
+
disable=
|
| 8 |
+
missing-module-docstring,
|
| 9 |
+
missing-function-docstring,
|
| 10 |
+
missing-class-docstring,
|
| 11 |
+
too-few-public-methods,
|
| 12 |
+
import-error,
|
| 13 |
+
no-member,
|
| 14 |
+
unused-import,
|
| 15 |
+
unused-variable,
|
| 16 |
+
broad-except,
|
| 17 |
+
line-too-long,
|
| 18 |
+
invalid-name,
|
| 19 |
+
consider-using-f-string
|
| 20 |
+
|
| 21 |
+
[FORMAT]
|
| 22 |
+
# Longitud máxima de línea
|
| 23 |
+
max-line-length=120
|
| 24 |
+
|
| 25 |
+
[BASIC]
|
| 26 |
+
# Convenciones de nombres
|
| 27 |
+
good-names=i,j,k,ex,Run,_,gr,os
|
| 28 |
+
|
| 29 |
+
[DESIGN]
|
| 30 |
+
# Límites de complejidad
|
| 31 |
+
max-args=7
|
| 32 |
+
max-locals=15
|
| 33 |
+
max-returns=6
|
| 34 |
+
max-branches=12
|
| 35 |
+
max-statements=50
|
.vscode/tasks.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": "2.0.0",
|
| 3 |
+
"tasks": [
|
| 4 |
+
{
|
| 5 |
+
"label": "Ejecutar GPT Local",
|
| 6 |
+
"type": "shell",
|
| 7 |
+
"command": "python3 main.py",
|
| 8 |
+
"isBackground": true,
|
| 9 |
+
"problemMatcher": [],
|
| 10 |
+
"group": "build"
|
| 11 |
+
}
|
| 12 |
+
]
|
| 13 |
+
}
|
Dockerfile
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🤖 GPT Local - Optimized Docker Container
|
| 2 |
+
FROM python:3.12-alpine
|
| 3 |
+
|
| 4 |
+
# Instalar dependencias del sistema necesarias
|
| 5 |
+
RUN apk add --no-cache \
|
| 6 |
+
gcc \
|
| 7 |
+
musl-dev \
|
| 8 |
+
libffi-dev \
|
| 9 |
+
openssl-dev \
|
| 10 |
+
curl \
|
| 11 |
+
ca-certificates \
|
| 12 |
+
git \
|
| 13 |
+
&& apk upgrade --no-cache
|
| 14 |
+
|
| 15 |
+
# Metadata
|
| 16 |
+
LABEL maintainer="GPT Local Team"
|
| 17 |
+
LABEL description="Sistema de chat GPT local con Hugging Face"
|
| 18 |
+
LABEL version="1.0"
|
| 19 |
+
|
| 20 |
+
# Configurar variables de entorno
|
| 21 |
+
ENV PYTHONPATH=/app
|
| 22 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 23 |
+
ENV PYTHONUNBUFFERED=1
|
| 24 |
+
ENV HF_HOME=/app/.cache/huggingface
|
| 25 |
+
ENV TRANSFORMERS_CACHE=/app/.cache/huggingface
|
| 26 |
+
ENV TORCH_HOME=/app/.cache/torch
|
| 27 |
+
|
| 28 |
+
# Crear directorio de trabajo
|
| 29 |
+
WORKDIR /app
|
| 30 |
+
|
| 31 |
+
# Crear usuario no privilegiado
|
| 32 |
+
RUN addgroup -g 1000 appuser && \
|
| 33 |
+
adduser -D -s /bin/sh -u 1000 -G appuser appuser
|
| 34 |
+
|
| 35 |
+
# Copiar y instalar dependencias
|
| 36 |
+
COPY requirements.txt .
|
| 37 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
| 38 |
+
pip install --no-cache-dir -r requirements.txt
|
| 39 |
+
|
| 40 |
+
# Copiar código de la aplicación
|
| 41 |
+
COPY . .
|
| 42 |
+
|
| 43 |
+
# Crear directorios necesarios con permisos apropiados
|
| 44 |
+
RUN mkdir -p models models_cache logs .cache/huggingface .cache/torch && \
|
| 45 |
+
chown -R appuser:appuser /app
|
| 46 |
+
|
| 47 |
+
# Cambiar a usuario no privilegiado
|
| 48 |
+
USER appuser
|
| 49 |
+
|
| 50 |
+
# Configurar permisos
|
| 51 |
+
RUN chmod +x *.py 2>/dev/null || true && \
|
| 52 |
+
chmod +x *.sh 2>/dev/null || true
|
| 53 |
+
|
| 54 |
+
# Exponer puerto
|
| 55 |
+
EXPOSE 7860
|
| 56 |
+
|
| 57 |
+
# Healthcheck mejorado
|
| 58 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
| 59 |
+
CMD curl -f http://localhost:7860/health || curl -f http://localhost:7860/ || exit 1
|
| 60 |
+
|
| 61 |
+
# Comando por defecto
|
| 62 |
+
CMD ["python3", "main.py"]
|
Dockerfile.backup
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🤖 GPT Local - Optimized Docker Container
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
# Metadata
|
| 5 |
+
LABEL maintainer="GPT Local Team"
|
| 6 |
+
LABEL description="Sistema de chat GPT local con Hugging Face"
|
| 7 |
+
LABEL version="1.0"
|
| 8 |
+
|
| 9 |
+
# Configurar variables de entorno
|
| 10 |
+
ENV PYTHONPATH=/app
|
| 11 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 12 |
+
ENV PYTHONUNBUFFERED=1
|
| 13 |
+
ENV HF_HOME=/app/.cache/huggingface
|
| 14 |
+
ENV TRANSFORMERS_CACHE=/app/.cache/huggingface
|
| 15 |
+
ENV TORCH_HOME=/app/.cache/torch
|
| 16 |
+
|
| 17 |
+
# Instalar dependencias del sistema
|
| 18 |
+
RUN apt-get update && apt-get install -y \
|
| 19 |
+
curl \
|
| 20 |
+
git \
|
| 21 |
+
build-essential \
|
| 22 |
+
&& rm -rf /var/lib/apt/lists/* \
|
| 23 |
+
&& apt-get clean
|
| 24 |
+
|
| 25 |
+
# Crear directorio de trabajo
|
| 26 |
+
WORKDIR /app
|
| 27 |
+
|
| 28 |
+
# Copiar archivos de requirements primero (para cache de Docker)
|
| 29 |
+
COPY requirements.txt requirements-dev.txt ./
|
| 30 |
+
|
| 31 |
+
# Instalar dependencias Python
|
| 32 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
| 33 |
+
pip install --no-cache-dir -r requirements.txt
|
| 34 |
+
|
| 35 |
+
# Copiar código fuente
|
| 36 |
+
COPY . .
|
| 37 |
+
|
| 38 |
+
# Crear directorios necesarios
|
| 39 |
+
RUN mkdir -p models models_cache logs .cache/huggingface .cache/torch
|
| 40 |
+
|
| 41 |
+
# Configurar permisos
|
| 42 |
+
RUN chmod +x *.py && \
|
| 43 |
+
chmod +x *.sh 2>/dev/null || true
|
| 44 |
+
|
| 45 |
+
# Exponer puerto
|
| 46 |
+
EXPOSE 7860
|
| 47 |
+
|
| 48 |
+
# Healthcheck
|
| 49 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
| 50 |
+
CMD curl -f http://localhost:7860/ || exit 1
|
| 51 |
+
|
| 52 |
+
# Comando por defecto (interfaz web)
|
| 53 |
+
CMD ["python3", "main.py"]
|
INSTRUCCIONES.md
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚀 Instrucciones de Uso - GPT Local
|
| 2 |
+
|
| 3 |
+
## ✅ Estado del Proyecto
|
| 4 |
+
|
| 5 |
+
Tu GPT local con Hugging Face está **completamente configurado y funcionando**. Las dependencias se han instalado correctamente:
|
| 6 |
+
|
| 7 |
+
- ✅ PyTorch 2.7.1
|
| 8 |
+
- ✅ Transformers 4.53.0
|
| 9 |
+
- ✅ Gradio 4.44.1
|
| 10 |
+
- ✅ Modelos preentrenados disponibles
|
| 11 |
+
|
| 12 |
+
## 🎯 Opciones de Ejecución
|
| 13 |
+
|
| 14 |
+
### 1. Chat en Terminal (Recomendado)
|
| 15 |
+
|
| 16 |
+
```bash
|
| 17 |
+
cd /Users/kuchimac/gpt-local
|
| 18 |
+
python3 chat_terminal.py
|
| 19 |
+
```
|
| 20 |
+
|
| 21 |
+
**Ventajas:**
|
| 22 |
+
|
| 23 |
+
- Funciona inmediatamente
|
| 24 |
+
- Sin problemas de puerto o navegador
|
| 25 |
+
- Ideal para pruebas rápidas
|
| 26 |
+
|
| 27 |
+
### 2. Interfaz Web con Gradio
|
| 28 |
+
|
| 29 |
+
```bash
|
| 30 |
+
cd /Users/kuchimac/gpt-local
|
| 31 |
+
python3 main.py
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
**Nota:** Si tienes problemas con Gradio, usa la versión de terminal.
|
| 35 |
+
|
| 36 |
+
### 3. Prueba Rápida
|
| 37 |
+
|
| 38 |
+
```bash
|
| 39 |
+
cd /Users/kuchimac/gpt-local
|
| 40 |
+
python3 test_gpt.py
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## 🤖 Modelos Disponibles
|
| 44 |
+
|
| 45 |
+
- `microsoft/DialoGPT-small` (por defecto, ~117MB)
|
| 46 |
+
- `gpt2` (~548MB)
|
| 47 |
+
- `distilgpt2` (~319MB)
|
| 48 |
+
|
| 49 |
+
## 💬 Comandos del Chat
|
| 50 |
+
|
| 51 |
+
En el chat de terminal:
|
| 52 |
+
|
| 53 |
+
- Escribe tu mensaje y presiona Enter
|
| 54 |
+
- `reset` - Reinicia la conversación
|
| 55 |
+
- `salir` - Cierra la aplicación
|
| 56 |
+
|
| 57 |
+
## ⚙️ Configuración
|
| 58 |
+
|
| 59 |
+
Edita `config/settings.py` para:
|
| 60 |
+
|
| 61 |
+
- Cambiar el modelo por defecto
|
| 62 |
+
- Ajustar parámetros de generación
|
| 63 |
+
- Modificar configuración de red
|
| 64 |
+
|
| 65 |
+
## 🔧 Resolución de Problemas
|
| 66 |
+
|
| 67 |
+
**Si el modelo no carga:**
|
| 68 |
+
|
| 69 |
+
```bash
|
| 70 |
+
# Verificar que las dependencias estén instaladas
|
| 71 |
+
python3 -c "import torch, transformers; print('OK')"
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
**Si hay errores de memoria:**
|
| 75 |
+
|
| 76 |
+
- Usa `microsoft/DialoGPT-small` en lugar de modelos más grandes
|
| 77 |
+
- Cierra otras aplicaciones para liberar RAM
|
| 78 |
+
|
| 79 |
+
**Si hay problemas con PYTHONPATH:**
|
| 80 |
+
|
| 81 |
+
```bash
|
| 82 |
+
export PYTHONPATH="/Users/kuchimac/Library/Python/3.9/lib/python/site-packages:/Users/kuchimac/gpt-local"
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
## 🎉 ¡Ya está listo
|
| 86 |
+
|
| 87 |
+
Tu GPT local está funcionando. Puedes empezar a chatear inmediatamente usando:
|
| 88 |
+
|
| 89 |
+
```bash
|
| 90 |
+
python3 chat_terminal.py
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
¡Disfruta tu GPT completamente privado y local! 🔒
|
README.md
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🤖 GPT Local - Sistema de Chat con Hugging Face
|
| 2 |
+
|
| 3 |
+
[](https://docker.com/)
|
| 4 |
+
[](https://python.org/)
|
| 5 |
+
[](https://huggingface.co/)
|
| 6 |
+
[](https://pytorch.org/)
|
| 7 |
+
|
| 8 |
+
Un sistema completo de chat GPT local usando modelos de Hugging Face con soporte para Docker, interfaz web y terminal.
|
| 9 |
+
|
| 10 |
+
## 🚀 Características
|
| 11 |
+
|
| 12 |
+
- 💬 **Chat Terminal**: Interfaz de línea de comandos rápida y eficiente
|
| 13 |
+
- 🌐 **Interfaz Web**: Gradio UI moderna y responsiva
|
| 14 |
+
- 🐳 **Docker Ready**: Despliegue containerizado completo
|
| 15 |
+
- 🤗 **Hugging Face**: Soporte para múltiples modelos preentrenados
|
| 16 |
+
- 🍎 **Apple Silicon**: Optimización MPS para M1/M2/M3 Macs
|
| 17 |
+
- ⚡ **GPU Acceleration**: CUDA y MPS automático
|
| 18 |
+
- 📦 **Easy Setup**: Configuración automática de dependencias
|
| 19 |
+
|
| 20 |
+
- 🤖 Interfaz de chat interactiva
|
| 21 |
+
- 🔒 Completamente local y privado
|
| 22 |
+
- 🚀 Modelos preentrenados de Hugging Face
|
| 23 |
+
- 💬 Generación de texto conversacional
|
| 24 |
+
- ⚙️ Configuración personalizable
|
| 25 |
+
|
| 26 |
+
## Requisitos
|
| 27 |
+
|
| 28 |
+
- Python 3.8 o superior
|
| 29 |
+
- torch
|
| 30 |
+
- transformers
|
| 31 |
+
- gradio (para la interfaz web)
|
| 32 |
+
|
| 33 |
+
## Instalación
|
| 34 |
+
|
| 35 |
+
1. Instalar dependencias:
|
| 36 |
+
|
| 37 |
+
```bash
|
| 38 |
+
pip install -r requirements.txt
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
1. Ejecutar la aplicación:
|
| 42 |
+
|
| 43 |
+
**Chat en Terminal:**
|
| 44 |
+
|
| 45 |
+
```bash
|
| 46 |
+
python3 chat_terminal.py
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
**Interfaz Web (si Gradio funciona):**
|
| 50 |
+
|
| 51 |
+
```bash
|
| 52 |
+
python3 main.py
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
**Prueba rápida:**
|
| 56 |
+
|
| 57 |
+
```bash
|
| 58 |
+
python3 test_gpt.py
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
## Uso
|
| 62 |
+
|
| 63 |
+
La aplicación abrirá una interfaz web en `http://localhost:7860` donde podrás:
|
| 64 |
+
|
| 65 |
+
- Chatear con el modelo GPT local
|
| 66 |
+
- Ajustar parámetros de generación
|
| 67 |
+
- Cambiar entre diferentes modelos
|
| 68 |
+
|
| 69 |
+
## Modelos Disponibles
|
| 70 |
+
|
| 71 |
+
- GPT-2 (por defecto)
|
| 72 |
+
- DialoGPT
|
| 73 |
+
- Otros modelos compatibles de Hugging Face
|
| 74 |
+
|
| 75 |
+
## Estructura del Proyecto
|
| 76 |
+
|
| 77 |
+
```text
|
| 78 |
+
gpt-local/
|
| 79 |
+
├── main.py # Aplicación principal
|
| 80 |
+
├── models/ # Gestión de modelos
|
| 81 |
+
│ ├── __init__.py
|
| 82 |
+
│ ├── model_loader.py # Carga de modelos
|
| 83 |
+
│ └── text_generator.py # Generación de texto
|
| 84 |
+
├── ui/ # Interfaz de usuario
|
| 85 |
+
│ ├── __init__.py
|
| 86 |
+
│ └── gradio_interface.py
|
| 87 |
+
├── config/ # Configuración
|
| 88 |
+
│ ├── __init__.py
|
| 89 |
+
│ └── settings.py
|
| 90 |
+
├── requirements.txt # Dependencias
|
| 91 |
+
└── README.md # Esta documentación
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
## Configuración
|
| 95 |
+
|
| 96 |
+
Edita `config/settings.py` para personalizar:
|
| 97 |
+
|
| 98 |
+
- Modelo por defecto
|
| 99 |
+
- Parámetros de generación
|
| 100 |
+
- Puerto de la interfaz web
|
chat_terminal.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Versión simple del GPT Local sin Gradio
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
import logging
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
# Agregar el directorio raíz al path
|
| 11 |
+
sys.path.append(str(Path(__file__).parent))
|
| 12 |
+
|
| 13 |
+
from models.model_loader import ModelLoader
|
| 14 |
+
from models.text_generator import TextGenerator
|
| 15 |
+
from config.settings import DEFAULT_MODEL
|
| 16 |
+
|
| 17 |
+
def setup_logging():
|
| 18 |
+
"""Configurar el sistema de logging"""
|
| 19 |
+
logging.basicConfig(
|
| 20 |
+
level=logging.INFO,
|
| 21 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
def main():
|
| 25 |
+
"""Función principal para el chat en terminal"""
|
| 26 |
+
setup_logging()
|
| 27 |
+
logger = logging.getLogger(__name__)
|
| 28 |
+
|
| 29 |
+
print("🤖 GPT Local - Chat en Terminal")
|
| 30 |
+
print("="*50)
|
| 31 |
+
|
| 32 |
+
# Inicializar componentes
|
| 33 |
+
model_loader = ModelLoader()
|
| 34 |
+
text_generator = TextGenerator(model_loader)
|
| 35 |
+
|
| 36 |
+
# Cargar modelo por defecto
|
| 37 |
+
print(f"📦 Cargando modelo: {DEFAULT_MODEL}")
|
| 38 |
+
if not model_loader.load_model(DEFAULT_MODEL):
|
| 39 |
+
print("❌ Error al cargar el modelo")
|
| 40 |
+
return
|
| 41 |
+
|
| 42 |
+
print("✅ Modelo cargado exitosamente")
|
| 43 |
+
print("💡 Escribe 'salir' para terminar")
|
| 44 |
+
print("💡 Escribe 'reset' para reiniciar el chat")
|
| 45 |
+
print("-"*50)
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
while True:
|
| 49 |
+
# Obtener entrada del usuario
|
| 50 |
+
user_input = input("\n👤 Tú: ").strip()
|
| 51 |
+
|
| 52 |
+
if user_input.lower() in ['salir', 'exit', 'quit']:
|
| 53 |
+
print("👋 ¡Hasta luego!")
|
| 54 |
+
break
|
| 55 |
+
|
| 56 |
+
if user_input.lower() == 'reset':
|
| 57 |
+
text_generator.reset_chat_history()
|
| 58 |
+
print("🔄 Chat reiniciado")
|
| 59 |
+
continue
|
| 60 |
+
|
| 61 |
+
if not user_input:
|
| 62 |
+
continue
|
| 63 |
+
|
| 64 |
+
# Generar respuesta
|
| 65 |
+
print("🤖 GPT: ", end="", flush=True)
|
| 66 |
+
try:
|
| 67 |
+
response = text_generator.generate_response(
|
| 68 |
+
user_input,
|
| 69 |
+
temperature=0.7,
|
| 70 |
+
max_length=200
|
| 71 |
+
)
|
| 72 |
+
print(response)
|
| 73 |
+
|
| 74 |
+
except Exception as e:
|
| 75 |
+
print(f"❌ Error: {str(e)}")
|
| 76 |
+
|
| 77 |
+
except KeyboardInterrupt:
|
| 78 |
+
print("\n👋 Chat interrumpido por el usuario")
|
| 79 |
+
except Exception as e:
|
| 80 |
+
logger.error(f"❌ Error en el chat: {str(e)}")
|
| 81 |
+
|
| 82 |
+
if __name__ == "__main__":
|
| 83 |
+
main()
|
config/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Módulo de configuración
|
config/settings.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Configuración del GPT Local
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Configuración de modelos
|
| 6 |
+
DEFAULT_MODEL = "microsoft/DialoGPT-small"
|
| 7 |
+
|
| 8 |
+
AVAILABLE_MODELS = [
|
| 9 |
+
"microsoft/DialoGPT-small",
|
| 10 |
+
"gpt2",
|
| 11 |
+
"distilgpt2"
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
# Parámetros de generación por defecto
|
| 15 |
+
DEFAULT_GENERATION_PARAMS = {
|
| 16 |
+
"temperature": 0.7,
|
| 17 |
+
"max_length": 512,
|
| 18 |
+
"top_p": 0.9,
|
| 19 |
+
"do_sample": True
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
# Configuración de la interfaz web
|
| 23 |
+
WEB_CONFIG = {
|
| 24 |
+
"host": "127.0.0.1",
|
| 25 |
+
"port": 7860,
|
| 26 |
+
"share": False,
|
| 27 |
+
"debug": False
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
# Configuración de logging
|
| 31 |
+
LOGGING_CONFIG = {
|
| 32 |
+
"level": "INFO",
|
| 33 |
+
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
# Configuración del dispositivo
|
| 37 |
+
DEVICE_CONFIG = {
|
| 38 |
+
"auto_select": True, # Seleccionar automáticamente GPU si está disponible
|
| 39 |
+
"force_cpu": False, # Forzar uso de CPU
|
| 40 |
+
"torch_dtype": "auto" # "float16" para GPU, "float32" para CPU
|
| 41 |
+
}
|
docker-compose.yml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: '3.8'
|
| 2 |
+
|
| 3 |
+
services:
|
| 4 |
+
gpt-local:
|
| 5 |
+
build: .
|
| 6 |
+
container_name: gpt-local
|
| 7 |
+
ports:
|
| 8 |
+
- "7860:7860"
|
| 9 |
+
volumes:
|
| 10 |
+
# Persistir cache de modelos
|
| 11 |
+
- ./models_cache:/app/models_cache
|
| 12 |
+
- ~/.cache/huggingface:/root/.cache/huggingface
|
| 13 |
+
environment:
|
| 14 |
+
- PYTHONPATH=/app
|
| 15 |
+
- HF_HOME=/root/.cache/huggingface
|
| 16 |
+
- HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}
|
| 17 |
+
- GITHUB_TOKEN=${GITHUB_TOKEN}
|
| 18 |
+
- DEFAULT_MODEL=${DEFAULT_MODEL:-microsoft/DialoGPT-small}
|
| 19 |
+
- DEVICE=${DEVICE:-auto}
|
| 20 |
+
# Para chat en terminal
|
| 21 |
+
tty: true
|
| 22 |
+
stdin_open: true
|
| 23 |
+
command: python3 chat_terminal.py
|
| 24 |
+
|
| 25 |
+
gpt-local-web:
|
| 26 |
+
build: .
|
| 27 |
+
container_name: gpt-local-web
|
| 28 |
+
ports:
|
| 29 |
+
- "7860:7860"
|
| 30 |
+
volumes:
|
| 31 |
+
- ./models_cache:/app/models_cache
|
| 32 |
+
- ~/.cache/huggingface:/root/.cache/huggingface
|
| 33 |
+
environment:
|
| 34 |
+
- PYTHONPATH=/app
|
| 35 |
+
- HF_HOME=/root/.cache/huggingface
|
| 36 |
+
- HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}
|
| 37 |
+
- GITHUB_TOKEN=${GITHUB_TOKEN}
|
| 38 |
+
- DEFAULT_MODEL=${DEFAULT_MODEL:-microsoft/DialoGPT-small}
|
| 39 |
+
- DEVICE=${DEVICE:-auto}
|
| 40 |
+
# Para interfaz web
|
| 41 |
+
command: python3 main.py
|
docker-manager.sh
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# 🐳 Script de gestión completa de Docker para GPT Local
|
| 3 |
+
# Construye, publica y despliega containers en Docker Hub
|
| 4 |
+
|
| 5 |
+
set -e
|
| 6 |
+
|
| 7 |
+
# Cargar variables de entorno
|
| 8 |
+
if [ -f .env ]; then
|
| 9 |
+
export $(cat .env | grep -v '^#' | xargs)
|
| 10 |
+
fi
|
| 11 |
+
|
| 12 |
+
# Colores para output
|
| 13 |
+
RED='\033[0;31m'
|
| 14 |
+
GREEN='\033[0;32m'
|
| 15 |
+
YELLOW='\033[1;33m'
|
| 16 |
+
BLUE='\033[0;34m'
|
| 17 |
+
NC='\033[0m'
|
| 18 |
+
|
| 19 |
+
print_status() {
|
| 20 |
+
echo -e "${BLUE}[INFO]${NC} $1"
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
print_success() {
|
| 24 |
+
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
print_warning() {
|
| 28 |
+
echo -e "${YELLOW}[WARNING]${NC} $1"
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
print_error() {
|
| 32 |
+
echo -e "${RED}[ERROR]${NC} $1"
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
# Variables
|
| 36 |
+
DOCKER_USERNAME=${DOCKER_USERNAME:-"drtv"}
|
| 37 |
+
IMAGE_NAME=${DOCKER_IMAGE_NAME:-"gpt-local"}
|
| 38 |
+
VERSION=${1:-"latest"}
|
| 39 |
+
FULL_IMAGE_NAME="$DOCKER_USERNAME/$IMAGE_NAME:$VERSION"
|
| 40 |
+
|
| 41 |
+
# Función para construir imagen
|
| 42 |
+
build_image() {
|
| 43 |
+
print_status "Construyendo imagen Docker: $FULL_IMAGE_NAME"
|
| 44 |
+
|
| 45 |
+
# Crear Dockerfile optimizado si no existe una versión reciente
|
| 46 |
+
create_optimized_dockerfile
|
| 47 |
+
|
| 48 |
+
# Construir imagen
|
| 49 |
+
if docker build -t "$FULL_IMAGE_NAME" -t "$DOCKER_USERNAME/$IMAGE_NAME:latest" .; then
|
| 50 |
+
print_success "Imagen construida exitosamente: $FULL_IMAGE_NAME"
|
| 51 |
+
|
| 52 |
+
# Mostrar tamaño de la imagen
|
| 53 |
+
local size=$(docker images --format "table {{.Repository}}:{{.Tag}}\t{{.Size}}" | grep "$DOCKER_USERNAME/$IMAGE_NAME" | head -1 | awk '{print $2}')
|
| 54 |
+
print_status "Tamaño de la imagen: $size"
|
| 55 |
+
else
|
| 56 |
+
print_error "Error construyendo la imagen"
|
| 57 |
+
return 1
|
| 58 |
+
fi
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
# Función para crear Dockerfile optimizado
|
| 62 |
+
create_optimized_dockerfile() {
|
| 63 |
+
cat > Dockerfile << 'EOF'
|
| 64 |
+
# 🤖 GPT Local - Optimized Docker Container
|
| 65 |
+
FROM python:3.11-slim
|
| 66 |
+
|
| 67 |
+
# Metadata
|
| 68 |
+
LABEL maintainer="GPT Local Team"
|
| 69 |
+
LABEL description="Sistema de chat GPT local con Hugging Face"
|
| 70 |
+
LABEL version="1.0"
|
| 71 |
+
|
| 72 |
+
# Configurar variables de entorno
|
| 73 |
+
ENV PYTHONPATH=/app
|
| 74 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 75 |
+
ENV PYTHONUNBUFFERED=1
|
| 76 |
+
ENV HF_HOME=/app/.cache/huggingface
|
| 77 |
+
ENV TRANSFORMERS_CACHE=/app/.cache/huggingface
|
| 78 |
+
ENV TORCH_HOME=/app/.cache/torch
|
| 79 |
+
|
| 80 |
+
# Instalar dependencias del sistema
|
| 81 |
+
RUN apt-get update && apt-get install -y \
|
| 82 |
+
curl \
|
| 83 |
+
git \
|
| 84 |
+
build-essential \
|
| 85 |
+
&& rm -rf /var/lib/apt/lists/* \
|
| 86 |
+
&& apt-get clean
|
| 87 |
+
|
| 88 |
+
# Crear directorio de trabajo
|
| 89 |
+
WORKDIR /app
|
| 90 |
+
|
| 91 |
+
# Copiar archivos de requirements primero (para cache de Docker)
|
| 92 |
+
COPY requirements.txt requirements-dev.txt ./
|
| 93 |
+
|
| 94 |
+
# Instalar dependencias Python
|
| 95 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
| 96 |
+
pip install --no-cache-dir -r requirements.txt
|
| 97 |
+
|
| 98 |
+
# Copiar código fuente
|
| 99 |
+
COPY . .
|
| 100 |
+
|
| 101 |
+
# Crear directorios necesarios
|
| 102 |
+
RUN mkdir -p models models_cache logs .cache/huggingface .cache/torch
|
| 103 |
+
|
| 104 |
+
# Configurar permisos
|
| 105 |
+
RUN chmod +x *.py && \
|
| 106 |
+
chmod +x *.sh 2>/dev/null || true
|
| 107 |
+
|
| 108 |
+
# Exponer puerto
|
| 109 |
+
EXPOSE 7860
|
| 110 |
+
|
| 111 |
+
# Healthcheck
|
| 112 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
| 113 |
+
CMD curl -f http://localhost:7860/ || exit 1
|
| 114 |
+
|
| 115 |
+
# Comando por defecto (interfaz web)
|
| 116 |
+
CMD ["python3", "main.py"]
|
| 117 |
+
EOF
|
| 118 |
+
print_success "Dockerfile optimizado creado"
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
# Función para hacer push a Docker Hub
|
| 122 |
+
push_image() {
|
| 123 |
+
print_status "Subiendo imagen a Docker Hub: $FULL_IMAGE_NAME"
|
| 124 |
+
|
| 125 |
+
# Verificar login
|
| 126 |
+
if ! docker info | grep -q "Username: $DOCKER_USERNAME"; then
|
| 127 |
+
print_error "No estás logueado en Docker Hub como $DOCKER_USERNAME"
|
| 128 |
+
print_status "Ejecuta: echo 'tu_token' | docker login -u $DOCKER_USERNAME --password-stdin"
|
| 129 |
+
return 1
|
| 130 |
+
fi
|
| 131 |
+
|
| 132 |
+
# Push imagen con tag específico
|
| 133 |
+
if docker push "$FULL_IMAGE_NAME"; then
|
| 134 |
+
print_success "Imagen $FULL_IMAGE_NAME subida exitosamente"
|
| 135 |
+
else
|
| 136 |
+
print_error "Error subiendo imagen con tag $VERSION"
|
| 137 |
+
return 1
|
| 138 |
+
fi
|
| 139 |
+
|
| 140 |
+
# Push imagen latest
|
| 141 |
+
if docker push "$DOCKER_USERNAME/$IMAGE_NAME:latest"; then
|
| 142 |
+
print_success "Imagen latest subida exitosamente"
|
| 143 |
+
print_status "Imagen disponible en: https://hub.docker.com/r/$DOCKER_USERNAME/$IMAGE_NAME"
|
| 144 |
+
else
|
| 145 |
+
print_error "Error subiendo imagen latest"
|
| 146 |
+
return 1
|
| 147 |
+
fi
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
# Función para correr container localmente
|
| 151 |
+
run_container() {
|
| 152 |
+
local mode=${1:-"web"}
|
| 153 |
+
|
| 154 |
+
print_status "Ejecutando container en modo: $mode"
|
| 155 |
+
|
| 156 |
+
# Parar container si ya está corriendo
|
| 157 |
+
docker stop "$DOCKER_USERNAME-$IMAGE_NAME" 2>/dev/null || true
|
| 158 |
+
docker rm "$DOCKER_USERNAME-$IMAGE_NAME" 2>/dev/null || true
|
| 159 |
+
|
| 160 |
+
case $mode in
|
| 161 |
+
"web")
|
| 162 |
+
docker run -d \
|
| 163 |
+
--name "$DOCKER_USERNAME-$IMAGE_NAME" \
|
| 164 |
+
-p 7860:7860 \
|
| 165 |
+
-v "$(pwd)/models_cache:/app/models_cache" \
|
| 166 |
+
-v "$(pwd)/logs:/app/logs" \
|
| 167 |
+
-e HUGGINGFACE_TOKEN="$HUGGINGFACE_TOKEN" \
|
| 168 |
+
-e DEFAULT_MODEL="$DEFAULT_MODEL" \
|
| 169 |
+
-e DEVICE="$DEVICE" \
|
| 170 |
+
"$FULL_IMAGE_NAME"
|
| 171 |
+
|
| 172 |
+
print_success "Container web ejecutándose en: http://localhost:7860"
|
| 173 |
+
;;
|
| 174 |
+
"terminal")
|
| 175 |
+
docker run -it \
|
| 176 |
+
--name "$DOCKER_USERNAME-$IMAGE_NAME-terminal" \
|
| 177 |
+
-v "$(pwd)/models_cache:/app/models_cache" \
|
| 178 |
+
-e HUGGINGFACE_TOKEN="$HUGGINGFACE_TOKEN" \
|
| 179 |
+
-e DEFAULT_MODEL="$DEFAULT_MODEL" \
|
| 180 |
+
-e DEVICE="$DEVICE" \
|
| 181 |
+
"$FULL_IMAGE_NAME" \
|
| 182 |
+
python3 chat_terminal.py
|
| 183 |
+
;;
|
| 184 |
+
"dev")
|
| 185 |
+
docker run -it \
|
| 186 |
+
--name "$DOCKER_USERNAME-$IMAGE_NAME-dev" \
|
| 187 |
+
-p 7860:7860 \
|
| 188 |
+
-v "$(pwd):/app" \
|
| 189 |
+
-e HUGGINGFACE_TOKEN="$HUGGINGFACE_TOKEN" \
|
| 190 |
+
-e DEFAULT_MODEL="$DEFAULT_MODEL" \
|
| 191 |
+
-e DEVICE="$DEVICE" \
|
| 192 |
+
"$FULL_IMAGE_NAME" \
|
| 193 |
+
bash
|
| 194 |
+
;;
|
| 195 |
+
esac
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
# Función para correr con docker-compose
|
| 199 |
+
run_compose() {
|
| 200 |
+
local service=${1:-"gpt-local-web"}
|
| 201 |
+
|
| 202 |
+
print_status "Ejecutando con docker-compose: $service"
|
| 203 |
+
|
| 204 |
+
# Construir y ejecutar
|
| 205 |
+
if docker-compose up --build -d "$service"; then
|
| 206 |
+
print_success "Servicio $service ejecutándose"
|
| 207 |
+
|
| 208 |
+
if [ "$service" = "gpt-local-web" ]; then
|
| 209 |
+
print_status "Interfaz web disponible en: http://localhost:7860"
|
| 210 |
+
fi
|
| 211 |
+
|
| 212 |
+
# Mostrar logs
|
| 213 |
+
print_status "Logs del servicio (Ctrl+C para salir):"
|
| 214 |
+
docker-compose logs -f "$service"
|
| 215 |
+
else
|
| 216 |
+
print_error "Error ejecutando docker-compose"
|
| 217 |
+
return 1
|
| 218 |
+
fi
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
# Función para limpiar imágenes y containers
|
| 222 |
+
cleanup() {
|
| 223 |
+
print_status "Limpiando containers e imágenes..."
|
| 224 |
+
|
| 225 |
+
# Parar todos los containers del proyecto
|
| 226 |
+
docker stop $(docker ps -q --filter "name=$DOCKER_USERNAME-$IMAGE_NAME") 2>/dev/null || true
|
| 227 |
+
|
| 228 |
+
# Remover containers
|
| 229 |
+
docker rm $(docker ps -aq --filter "name=$DOCKER_USERNAME-$IMAGE_NAME") 2>/dev/null || true
|
| 230 |
+
|
| 231 |
+
# Remover imágenes no utilizadas
|
| 232 |
+
docker image prune -f
|
| 233 |
+
|
| 234 |
+
print_success "Limpieza completada"
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
# Función para mostrar estado
|
| 238 |
+
status() {
|
| 239 |
+
print_status "Estado de containers GPT Local:"
|
| 240 |
+
|
| 241 |
+
# Containers corriendo
|
| 242 |
+
echo "Containers corriendo:"
|
| 243 |
+
docker ps --filter "name=$DOCKER_USERNAME-$IMAGE_NAME" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
|
| 244 |
+
|
| 245 |
+
echo ""
|
| 246 |
+
echo "Imágenes disponibles:"
|
| 247 |
+
docker images --filter "reference=$DOCKER_USERNAME/$IMAGE_NAME" --format "table {{.Repository}}\t{{.Tag}}\t{{.Size}}\t{{.CreatedSince}}"
|
| 248 |
+
|
| 249 |
+
echo ""
|
| 250 |
+
echo "Uso de espacio Docker:"
|
| 251 |
+
docker system df
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
# Función principal
|
| 255 |
+
main() {
|
| 256 |
+
local action=${1:-"help"}
|
| 257 |
+
local param=${2:-"latest"}
|
| 258 |
+
|
| 259 |
+
case $action in
|
| 260 |
+
"build")
|
| 261 |
+
build_image
|
| 262 |
+
;;
|
| 263 |
+
"push")
|
| 264 |
+
build_image
|
| 265 |
+
push_image
|
| 266 |
+
;;
|
| 267 |
+
"run")
|
| 268 |
+
run_container "$param"
|
| 269 |
+
;;
|
| 270 |
+
"compose")
|
| 271 |
+
run_compose "$param"
|
| 272 |
+
;;
|
| 273 |
+
"cleanup")
|
| 274 |
+
cleanup
|
| 275 |
+
;;
|
| 276 |
+
"status")
|
| 277 |
+
status
|
| 278 |
+
;;
|
| 279 |
+
"full")
|
| 280 |
+
build_image
|
| 281 |
+
push_image
|
| 282 |
+
print_status "🎉 Pipeline completo ejecutado!"
|
| 283 |
+
;;
|
| 284 |
+
*)
|
| 285 |
+
echo "🐳 Script de gestión Docker para GPT Local"
|
| 286 |
+
echo ""
|
| 287 |
+
echo "Uso: $0 [comando] [parámetro]"
|
| 288 |
+
echo ""
|
| 289 |
+
echo "Comandos:"
|
| 290 |
+
echo " build - Construir imagen Docker"
|
| 291 |
+
echo " push - Construir y subir a Docker Hub"
|
| 292 |
+
echo " run [web|terminal|dev] - Ejecutar container"
|
| 293 |
+
echo " compose [service] - Ejecutar con docker-compose"
|
| 294 |
+
echo " cleanup - Limpiar containers e imágenes"
|
| 295 |
+
echo " status - Mostrar estado actual"
|
| 296 |
+
echo " full - Pipeline completo (build + push)"
|
| 297 |
+
echo ""
|
| 298 |
+
echo "Ejemplos:"
|
| 299 |
+
echo " $0 build # Construir imagen"
|
| 300 |
+
echo " $0 push # Construir y subir"
|
| 301 |
+
echo " $0 run web # Ejecutar interfaz web"
|
| 302 |
+
echo " $0 run terminal # Ejecutar chat terminal"
|
| 303 |
+
echo " $0 compose # Ejecutar con docker-compose"
|
| 304 |
+
echo ""
|
| 305 |
+
echo "Variables de entorno (archivo .env):"
|
| 306 |
+
echo " DOCKER_USERNAME=$DOCKER_USERNAME"
|
| 307 |
+
echo " DOCKER_IMAGE_NAME=$IMAGE_NAME"
|
| 308 |
+
echo " HUGGINGFACE_TOKEN=***"
|
| 309 |
+
;;
|
| 310 |
+
esac
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
# Ejecutar script
|
| 314 |
+
main "$@"
|
environment.yml
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Conda environment file for GPT Local
|
| 2 |
+
name: gpt-local
|
| 3 |
+
channels:
|
| 4 |
+
- pytorch
|
| 5 |
+
- conda-forge
|
| 6 |
+
- defaults
|
| 7 |
+
dependencies:
|
| 8 |
+
- python=3.9
|
| 9 |
+
- pytorch>=2.0.0
|
| 10 |
+
- pytorch-cpu # or pytorch-cuda for GPU
|
| 11 |
+
- pip
|
| 12 |
+
- pip:
|
| 13 |
+
- transformers>=4.21.0
|
| 14 |
+
- gradio>=3.50.0
|
| 15 |
+
- accelerate>=0.20.0
|
| 16 |
+
- sentencepiece>=0.1.99
|
| 17 |
+
- numpy>=1.21.0
|
| 18 |
+
- pandas>=1.3.0
|
| 19 |
+
- matplotlib>=3.5.0
|
| 20 |
+
- requests>=2.25.0
|
| 21 |
+
- tqdm>=4.60.0
|
main.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
GPT Local - Aplicación principal
|
| 4 |
+
Ejecuta el GPT de texto local con interfaz web
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
import logging
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
# Agregar el directorio raíz al path
|
| 12 |
+
sys.path.append(str(Path(__file__).parent))
|
| 13 |
+
|
| 14 |
+
from ui.gradio_interface import GradioInterface
|
| 15 |
+
from config.settings import WEB_CONFIG, LOGGING_CONFIG
|
| 16 |
+
|
| 17 |
+
def setup_logging():
|
| 18 |
+
"""Configurar el sistema de logging"""
|
| 19 |
+
logging.basicConfig(
|
| 20 |
+
level=getattr(logging, LOGGING_CONFIG["level"]),
|
| 21 |
+
format=LOGGING_CONFIG["format"]
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
def main():
|
| 25 |
+
"""Función principal"""
|
| 26 |
+
# Configurar logging
|
| 27 |
+
setup_logging()
|
| 28 |
+
logger = logging.getLogger(__name__)
|
| 29 |
+
|
| 30 |
+
logger.info("🚀 Iniciando GPT Local...")
|
| 31 |
+
logger.info(f"Configuración: {WEB_CONFIG}")
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
# Crear e iniciar la interfaz
|
| 35 |
+
interface = GradioInterface()
|
| 36 |
+
|
| 37 |
+
logger.info("🌐 Lanzando interfaz web...")
|
| 38 |
+
interface.launch(
|
| 39 |
+
server_name=WEB_CONFIG["host"],
|
| 40 |
+
server_port=WEB_CONFIG["port"],
|
| 41 |
+
share=WEB_CONFIG["share"],
|
| 42 |
+
debug=WEB_CONFIG["debug"]
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
except KeyboardInterrupt:
|
| 46 |
+
logger.info("👋 Aplicación detenida por el usuario")
|
| 47 |
+
except Exception as e:
|
| 48 |
+
logger.error(f"❌ Error al ejecutar la aplicación: {str(e)}")
|
| 49 |
+
sys.exit(1)
|
| 50 |
+
|
| 51 |
+
if __name__ == "__main__":
|
| 52 |
+
main()
|
manage-repos.sh
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# 🔧 Script de gestión de repositorios para GPT Local
|
| 3 |
+
# Automatiza la creación y configuración de repositorios en GitHub y Hugging Face
|
| 4 |
+
|
| 5 |
+
set -e
|
| 6 |
+
|
| 7 |
+
# Cargar variables de entorno
|
| 8 |
+
if [ -f .env ]; then
|
| 9 |
+
export $(cat .env | grep -v '^#' | xargs)
|
| 10 |
+
fi
|
| 11 |
+
|
| 12 |
+
# Colores para output
|
| 13 |
+
RED='\033[0;31m'
|
| 14 |
+
GREEN='\033[0;32m'
|
| 15 |
+
YELLOW='\033[1;33m'
|
| 16 |
+
BLUE='\033[0;34m'
|
| 17 |
+
NC='\033[0m' # No Color
|
| 18 |
+
|
| 19 |
+
print_status() {
|
| 20 |
+
echo -e "${BLUE}[INFO]${NC} $1"
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
print_success() {
|
| 24 |
+
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
print_warning() {
|
| 28 |
+
echo -e "${YELLOW}[WARNING]${NC} $1"
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
print_error() {
|
| 32 |
+
echo -e "${RED}[ERROR]${NC} $1"
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
# Función para crear repositorio en GitHub
|
| 36 |
+
create_github_repo() {
|
| 37 |
+
local repo_name=${1:-"gpt-local"}
|
| 38 |
+
local description="🤖 Sistema de chat GPT local con Hugging Face - Soporte Docker, CLI y múltiples modelos"
|
| 39 |
+
|
| 40 |
+
print_status "Creando repositorio en GitHub: $repo_name"
|
| 41 |
+
|
| 42 |
+
# Verificar autenticación
|
| 43 |
+
if ! gh auth status &> /dev/null; then
|
| 44 |
+
print_error "GitHub CLI no está autenticado"
|
| 45 |
+
print_status "Ejecuta: gh auth login --with-token"
|
| 46 |
+
return 1
|
| 47 |
+
fi
|
| 48 |
+
|
| 49 |
+
# Crear repositorio
|
| 50 |
+
if gh repo create "$repo_name" --description "$description" --public --clone=false; then
|
| 51 |
+
print_success "Repositorio '$repo_name' creado en GitHub"
|
| 52 |
+
|
| 53 |
+
# Configurar remoto
|
| 54 |
+
if git remote | grep -q "origin"; then
|
| 55 |
+
git remote set-url origin "https://github.com/$(gh api user --jq .login)/$repo_name.git"
|
| 56 |
+
else
|
| 57 |
+
git remote add origin "https://github.com/$(gh api user --jq .login)/$repo_name.git"
|
| 58 |
+
fi
|
| 59 |
+
|
| 60 |
+
print_success "Remoto configurado"
|
| 61 |
+
return 0
|
| 62 |
+
else
|
| 63 |
+
print_warning "El repositorio puede ya existir o hubo un error"
|
| 64 |
+
return 1
|
| 65 |
+
fi
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
# Función para crear repositorio en Hugging Face
|
| 69 |
+
create_huggingface_repo() {
|
| 70 |
+
local repo_name=${1:-"gpt-local"}
|
| 71 |
+
local repo_type=${2:-"space"} # space, model, dataset
|
| 72 |
+
|
| 73 |
+
print_status "Creando repositorio en Hugging Face: $repo_name"
|
| 74 |
+
|
| 75 |
+
# Verificar token
|
| 76 |
+
if [ -z "$HUGGINGFACE_TOKEN" ]; then
|
| 77 |
+
print_error "Token de Hugging Face no configurado"
|
| 78 |
+
print_status "Configura HUGGINGFACE_TOKEN en .env"
|
| 79 |
+
return 1
|
| 80 |
+
fi
|
| 81 |
+
|
| 82 |
+
# Crear repositorio usando API
|
| 83 |
+
case $repo_type in
|
| 84 |
+
"space")
|
| 85 |
+
print_status "Creando Hugging Face Space..."
|
| 86 |
+
if huggingface-cli upload . . --repo-id "$(huggingface-cli whoami | head -1)/$repo_name" --repo-type space --create; then
|
| 87 |
+
print_success "Hugging Face Space creado: https://huggingface.co/spaces/$(huggingface-cli whoami | head -1)/$repo_name"
|
| 88 |
+
else
|
| 89 |
+
print_warning "Error creando Space o ya existe"
|
| 90 |
+
fi
|
| 91 |
+
;;
|
| 92 |
+
"model")
|
| 93 |
+
print_status "Creando repositorio de modelo..."
|
| 94 |
+
# Para modelos fine-tuneados
|
| 95 |
+
;;
|
| 96 |
+
"dataset")
|
| 97 |
+
print_status "Creando repositorio de dataset..."
|
| 98 |
+
# Para datasets personalizados
|
| 99 |
+
;;
|
| 100 |
+
esac
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
# Función para configurar git
|
| 104 |
+
setup_git() {
|
| 105 |
+
print_status "Configurando Git..."
|
| 106 |
+
|
| 107 |
+
# Inicializar si no existe
|
| 108 |
+
if [ ! -d ".git" ]; then
|
| 109 |
+
git init
|
| 110 |
+
print_success "Repositorio Git inicializado"
|
| 111 |
+
fi
|
| 112 |
+
|
| 113 |
+
# Configurar gitignore si no existe
|
| 114 |
+
if [ ! -f ".gitignore" ]; then
|
| 115 |
+
cat > .gitignore << 'EOF'
|
| 116 |
+
# Tokens y configuración sensible
|
| 117 |
+
.env
|
| 118 |
+
*.token
|
| 119 |
+
.cache/
|
| 120 |
+
.secrets/
|
| 121 |
+
|
| 122 |
+
# Python
|
| 123 |
+
__pycache__/
|
| 124 |
+
*.py[cod]
|
| 125 |
+
*$py.class
|
| 126 |
+
*.so
|
| 127 |
+
.Python
|
| 128 |
+
build/
|
| 129 |
+
develop-eggs/
|
| 130 |
+
dist/
|
| 131 |
+
downloads/
|
| 132 |
+
eggs/
|
| 133 |
+
.eggs/
|
| 134 |
+
lib/
|
| 135 |
+
lib64/
|
| 136 |
+
parts/
|
| 137 |
+
sdist/
|
| 138 |
+
var/
|
| 139 |
+
wheels/
|
| 140 |
+
*.egg-info/
|
| 141 |
+
.installed.cfg
|
| 142 |
+
*.egg
|
| 143 |
+
|
| 144 |
+
# Modelos y cache
|
| 145 |
+
models_cache/
|
| 146 |
+
models/*/
|
| 147 |
+
*.bin
|
| 148 |
+
*.safetensors
|
| 149 |
+
.cache/
|
| 150 |
+
huggingface_cache/
|
| 151 |
+
|
| 152 |
+
# Logs
|
| 153 |
+
*.log
|
| 154 |
+
logs/
|
| 155 |
+
.logs/
|
| 156 |
+
|
| 157 |
+
# OS
|
| 158 |
+
.DS_Store
|
| 159 |
+
.DS_Store?
|
| 160 |
+
._*
|
| 161 |
+
.Spotlight-V100
|
| 162 |
+
.Trashes
|
| 163 |
+
ehthumbs.db
|
| 164 |
+
Thumbs.db
|
| 165 |
+
|
| 166 |
+
# IDE
|
| 167 |
+
.vscode/
|
| 168 |
+
.idea/
|
| 169 |
+
*.swp
|
| 170 |
+
*.swo
|
| 171 |
+
*~
|
| 172 |
+
|
| 173 |
+
# Docker
|
| 174 |
+
.docker/
|
| 175 |
+
docker-data/
|
| 176 |
+
|
| 177 |
+
# Jupyter
|
| 178 |
+
.ipynb_checkpoints/
|
| 179 |
+
*.ipynb
|
| 180 |
+
|
| 181 |
+
# Test coverage
|
| 182 |
+
htmlcov/
|
| 183 |
+
.coverage
|
| 184 |
+
.coverage.*
|
| 185 |
+
coverage.xml
|
| 186 |
+
*.cover
|
| 187 |
+
.hypothesis/
|
| 188 |
+
.pytest_cache/
|
| 189 |
+
EOF
|
| 190 |
+
print_success "Archivo .gitignore creado"
|
| 191 |
+
fi
|
| 192 |
+
|
| 193 |
+
# Agregar archivos
|
| 194 |
+
git add .
|
| 195 |
+
|
| 196 |
+
# Commit inicial si no hay commits
|
| 197 |
+
if ! git log --oneline -1 &> /dev/null; then
|
| 198 |
+
git commit -m "🚀 Initial commit: GPT Local project setup
|
| 199 |
+
|
| 200 |
+
- ✅ Complete project structure with models, UI, and config
|
| 201 |
+
- ✅ Docker support with multi-service configuration
|
| 202 |
+
- ✅ Hugging Face and GitHub integration
|
| 203 |
+
- ✅ Python CLI tools and utilities
|
| 204 |
+
- ✅ Multiple model support (DialoGPT, Mistral, Gemma)
|
| 205 |
+
- ✅ Terminal and web chat interfaces
|
| 206 |
+
- ✅ Apple Silicon MPS optimization
|
| 207 |
+
- ✅ Comprehensive documentation and setup scripts"
|
| 208 |
+
print_success "Commit inicial creado"
|
| 209 |
+
fi
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
# Función para hacer push completo
|
| 213 |
+
push_to_github() {
|
| 214 |
+
print_status "Subiendo código a GitHub..."
|
| 215 |
+
|
| 216 |
+
# Verificar que tenemos remoto
|
| 217 |
+
if ! git remote | grep -q "origin"; then
|
| 218 |
+
print_error "No hay remoto configurado. Ejecuta create_github_repo primero"
|
| 219 |
+
return 1
|
| 220 |
+
fi
|
| 221 |
+
|
| 222 |
+
# Push
|
| 223 |
+
if git push -u origin main 2>/dev/null || git push -u origin master 2>/dev/null; then
|
| 224 |
+
print_success "Código subido a GitHub exitosamente"
|
| 225 |
+
|
| 226 |
+
# Mostrar URL del repositorio
|
| 227 |
+
local repo_url=$(git remote get-url origin | sed 's/\.git$//')
|
| 228 |
+
print_status "Repositorio disponible en: $repo_url"
|
| 229 |
+
else
|
| 230 |
+
print_error "Error subiendo a GitHub"
|
| 231 |
+
return 1
|
| 232 |
+
fi
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
# Función para crear README de Hugging Face Space
|
| 236 |
+
create_huggingface_readme() {
|
| 237 |
+
cat > README_HF.md << 'EOF'
|
| 238 |
+
---
|
| 239 |
+
title: GPT Local Chat
|
| 240 |
+
emoji: 🤖
|
| 241 |
+
colorFrom: blue
|
| 242 |
+
colorTo: green
|
| 243 |
+
sdk: gradio
|
| 244 |
+
sdk_version: 4.44.1
|
| 245 |
+
app_file: main.py
|
| 246 |
+
pinned: false
|
| 247 |
+
license: mit
|
| 248 |
+
tags:
|
| 249 |
+
- chatbot
|
| 250 |
+
- gpt
|
| 251 |
+
- huggingface
|
| 252 |
+
- pytorch
|
| 253 |
+
- transformers
|
| 254 |
+
- gradio
|
| 255 |
+
- spanish
|
| 256 |
+
- conversational-ai
|
| 257 |
+
---
|
| 258 |
+
|
| 259 |
+
# 🤖 GPT Local Chat
|
| 260 |
+
|
| 261 |
+
Un sistema de chat GPT local potenciado por modelos de Hugging Face.
|
| 262 |
+
|
| 263 |
+
## Características
|
| 264 |
+
|
| 265 |
+
- 💬 Chat conversacional en tiempo real
|
| 266 |
+
- 🤗 Múltiples modelos de Hugging Face
|
| 267 |
+
- 🍎 Optimizado para Apple Silicon (MPS)
|
| 268 |
+
- ⚡ GPU acceleration automática
|
| 269 |
+
- 🌐 Interfaz web moderna con Gradio
|
| 270 |
+
|
| 271 |
+
## Modelos Soportados
|
| 272 |
+
|
| 273 |
+
- DialoGPT (small/medium/large)
|
| 274 |
+
- Mistral 7B Instruct
|
| 275 |
+
- Google Gemma 2B
|
| 276 |
+
|
| 277 |
+
## Uso
|
| 278 |
+
|
| 279 |
+
Simplemente escribe tu mensaje y presiona Enter para chatear con el modelo.
|
| 280 |
+
|
| 281 |
+
## Código Fuente
|
| 282 |
+
|
| 283 |
+
El código completo está disponible en: [GitHub Repository](https://github.com/tu-usuario/gpt-local)
|
| 284 |
+
EOF
|
| 285 |
+
print_success "README de Hugging Face creado"
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
# Función principal
|
| 289 |
+
main() {
|
| 290 |
+
local action=${1:-"full"}
|
| 291 |
+
local repo_name=${2:-"gpt-local"}
|
| 292 |
+
|
| 293 |
+
print_status "🚀 Iniciando gestión de repositorios..."
|
| 294 |
+
|
| 295 |
+
case $action in
|
| 296 |
+
"github")
|
| 297 |
+
setup_git
|
| 298 |
+
create_github_repo "$repo_name"
|
| 299 |
+
push_to_github
|
| 300 |
+
;;
|
| 301 |
+
"huggingface")
|
| 302 |
+
create_huggingface_readme
|
| 303 |
+
create_huggingface_repo "$repo_name" "space"
|
| 304 |
+
;;
|
| 305 |
+
"full")
|
| 306 |
+
setup_git
|
| 307 |
+
create_github_repo "$repo_name"
|
| 308 |
+
push_to_github
|
| 309 |
+
create_huggingface_readme
|
| 310 |
+
create_huggingface_repo "$repo_name" "space"
|
| 311 |
+
;;
|
| 312 |
+
"setup")
|
| 313 |
+
setup_git
|
| 314 |
+
;;
|
| 315 |
+
"push")
|
| 316 |
+
push_to_github
|
| 317 |
+
;;
|
| 318 |
+
*)
|
| 319 |
+
echo "Uso: $0 [github|huggingface|full|setup|push] [nombre_repo]"
|
| 320 |
+
echo ""
|
| 321 |
+
echo "Comandos:"
|
| 322 |
+
echo " github - Crear solo repositorio GitHub"
|
| 323 |
+
echo " huggingface - Crear solo Hugging Face Space"
|
| 324 |
+
echo " full - Crear ambos repositorios"
|
| 325 |
+
echo " setup - Solo configurar Git local"
|
| 326 |
+
echo " push - Solo subir cambios a GitHub"
|
| 327 |
+
exit 1
|
| 328 |
+
;;
|
| 329 |
+
esac
|
| 330 |
+
|
| 331 |
+
print_success "✅ Gestión de repositorios completada!"
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
# Ejecutar script
|
| 335 |
+
main "$@"
|
models/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Módulo de modelos para GPT Local
|
models/model_loader.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Cargador de modelos GPT para uso local
|
| 3 |
+
"""
|
| 4 |
+
import torch
|
| 5 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 6 |
+
import logging
|
| 7 |
+
|
| 8 |
+
logging.basicConfig(level=logging.INFO)
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
class ModelLoader:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
self.model = None
|
| 14 |
+
self.tokenizer = None
|
| 15 |
+
# Optimización para Apple Silicon
|
| 16 |
+
if torch.backends.mps.is_available():
|
| 17 |
+
self.device = "mps"
|
| 18 |
+
elif torch.cuda.is_available():
|
| 19 |
+
self.device = "cuda"
|
| 20 |
+
else:
|
| 21 |
+
self.device = "cpu"
|
| 22 |
+
|
| 23 |
+
def load_model(self, model_name="microsoft/DialoGPT-medium"):
|
| 24 |
+
"""
|
| 25 |
+
Carga un modelo GPT desde Hugging Face
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
model_name (str): Nombre del modelo en Hugging Face Hub
|
| 29 |
+
"""
|
| 30 |
+
try:
|
| 31 |
+
logger.info(f"Cargando modelo: {model_name}")
|
| 32 |
+
logger.info(f"Usando dispositivo: {self.device}")
|
| 33 |
+
|
| 34 |
+
# Cargar tokenizer
|
| 35 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 36 |
+
|
| 37 |
+
# Agregar pad_token si no existe
|
| 38 |
+
if self.tokenizer.pad_token is None:
|
| 39 |
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
| 40 |
+
|
| 41 |
+
# Cargar modelo
|
| 42 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
| 43 |
+
model_name,
|
| 44 |
+
torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
|
| 45 |
+
device_map="auto" if self.device == "cuda" else None
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
if self.device == "cpu":
|
| 49 |
+
self.model = self.model.to(self.device)
|
| 50 |
+
|
| 51 |
+
logger.info("Modelo cargado exitosamente")
|
| 52 |
+
return True
|
| 53 |
+
|
| 54 |
+
except Exception as e:
|
| 55 |
+
logger.error(f"Error al cargar el modelo: {str(e)}")
|
| 56 |
+
return False
|
| 57 |
+
|
| 58 |
+
def get_model_info(self):
|
| 59 |
+
"""Retorna información del modelo cargado"""
|
| 60 |
+
if self.model is None:
|
| 61 |
+
return {"status": "No hay modelo cargado"}
|
| 62 |
+
|
| 63 |
+
return {
|
| 64 |
+
"status": "Modelo cargado",
|
| 65 |
+
"device": self.device,
|
| 66 |
+
"model_type": type(self.model).__name__,
|
| 67 |
+
"vocab_size": self.tokenizer.vocab_size if self.tokenizer else "N/A"
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
def is_loaded(self):
|
| 71 |
+
"""Verifica si hay un modelo cargado"""
|
| 72 |
+
return self.model is not None and self.tokenizer is not None
|
models/text_generator.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Generador de texto usando modelos GPT locales
|
| 3 |
+
"""
|
| 4 |
+
import torch
|
| 5 |
+
from typing import List, Dict
|
| 6 |
+
import logging
|
| 7 |
+
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
+
|
| 10 |
+
class TextGenerator:
|
| 11 |
+
def __init__(self, model_loader):
|
| 12 |
+
self.model_loader = model_loader
|
| 13 |
+
self.chat_history_ids = None
|
| 14 |
+
|
| 15 |
+
def generate_response(self, user_input: str, **kwargs) -> str:
|
| 16 |
+
"""
|
| 17 |
+
Genera una respuesta basada en la entrada del usuario
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
user_input (str): Mensaje del usuario
|
| 21 |
+
**kwargs: Parámetros de generación (max_length, temperature, etc.)
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
str: Respuesta generada
|
| 25 |
+
"""
|
| 26 |
+
if not self.model_loader.is_loaded():
|
| 27 |
+
return "Error: No hay modelo cargado"
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
# Parámetros por defecto
|
| 31 |
+
max_length = kwargs.get('max_length', 512)
|
| 32 |
+
temperature = kwargs.get('temperature', 0.7)
|
| 33 |
+
top_p = kwargs.get('top_p', 0.9)
|
| 34 |
+
do_sample = kwargs.get('do_sample', True)
|
| 35 |
+
|
| 36 |
+
# Codificar la entrada del usuario
|
| 37 |
+
new_user_input_ids = self.model_loader.tokenizer.encode(
|
| 38 |
+
user_input + self.model_loader.tokenizer.eos_token,
|
| 39 |
+
return_tensors='pt'
|
| 40 |
+
).to(self.model_loader.device)
|
| 41 |
+
|
| 42 |
+
# Concatenar con el historial de chat
|
| 43 |
+
if self.chat_history_ids is not None:
|
| 44 |
+
bot_input_ids = torch.cat([self.chat_history_ids, new_user_input_ids], dim=-1)
|
| 45 |
+
else:
|
| 46 |
+
bot_input_ids = new_user_input_ids
|
| 47 |
+
|
| 48 |
+
# Generar respuesta
|
| 49 |
+
with torch.no_grad():
|
| 50 |
+
chat_history_ids = self.model_loader.model.generate(
|
| 51 |
+
bot_input_ids,
|
| 52 |
+
max_length=max_length,
|
| 53 |
+
num_beams=1,
|
| 54 |
+
do_sample=do_sample,
|
| 55 |
+
temperature=temperature,
|
| 56 |
+
top_p=top_p,
|
| 57 |
+
pad_token_id=self.model_loader.tokenizer.eos_token_id,
|
| 58 |
+
attention_mask=torch.ones(bot_input_ids.shape, device=self.model_loader.device)
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# Actualizar historial
|
| 62 |
+
self.chat_history_ids = chat_history_ids
|
| 63 |
+
|
| 64 |
+
# Decodificar solo la nueva respuesta
|
| 65 |
+
response = self.model_loader.tokenizer.decode(
|
| 66 |
+
chat_history_ids[:, bot_input_ids.shape[-1]:][0],
|
| 67 |
+
skip_special_tokens=True
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
return str(response).strip()
|
| 71 |
+
|
| 72 |
+
except Exception as e:
|
| 73 |
+
logger.error(f"Error en la generación: {str(e)}")
|
| 74 |
+
return f"Error al generar respuesta: {str(e)}"
|
| 75 |
+
|
| 76 |
+
def generate_text(self, prompt: str, **kwargs) -> str:
|
| 77 |
+
"""
|
| 78 |
+
Genera texto continuando un prompt (sin historial de chat)
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
prompt (str): Texto inicial
|
| 82 |
+
**kwargs: Parámetros de generación
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
str: Texto generado
|
| 86 |
+
"""
|
| 87 |
+
if not self.model_loader.is_loaded():
|
| 88 |
+
return "Error: No hay modelo cargado"
|
| 89 |
+
|
| 90 |
+
try:
|
| 91 |
+
# Parámetros por defecto
|
| 92 |
+
max_length = kwargs.get('max_length', 100)
|
| 93 |
+
temperature = kwargs.get('temperature', 0.8)
|
| 94 |
+
top_p = kwargs.get('top_p', 0.9)
|
| 95 |
+
do_sample = kwargs.get('do_sample', True)
|
| 96 |
+
|
| 97 |
+
# Codificar el prompt
|
| 98 |
+
input_ids = self.model_loader.tokenizer.encode(
|
| 99 |
+
prompt,
|
| 100 |
+
return_tensors='pt'
|
| 101 |
+
).to(self.model_loader.device)
|
| 102 |
+
|
| 103 |
+
# Generar texto
|
| 104 |
+
with torch.no_grad():
|
| 105 |
+
output = self.model_loader.model.generate(
|
| 106 |
+
input_ids,
|
| 107 |
+
max_length=input_ids.shape[1] + max_length,
|
| 108 |
+
do_sample=do_sample,
|
| 109 |
+
temperature=temperature,
|
| 110 |
+
top_p=top_p,
|
| 111 |
+
pad_token_id=self.model_loader.tokenizer.eos_token_id,
|
| 112 |
+
attention_mask=torch.ones(input_ids.shape, device=self.model_loader.device)
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Decodificar solo el texto generado
|
| 116 |
+
generated_text = self.model_loader.tokenizer.decode(
|
| 117 |
+
output[0][input_ids.shape[1]:],
|
| 118 |
+
skip_special_tokens=True
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
return str(generated_text.strip())
|
| 122 |
+
|
| 123 |
+
except Exception as e:
|
| 124 |
+
logger.error(f"Error en la generación: {str(e)}")
|
| 125 |
+
return f"Error al generar texto: {str(e)}"
|
| 126 |
+
|
| 127 |
+
def reset_chat_history(self):
|
| 128 |
+
"""Reinicia el historial de chat"""
|
| 129 |
+
self.chat_history_ids = None
|
| 130 |
+
logger.info("Historial de chat reiniciado")
|
| 131 |
+
|
| 132 |
+
def get_generation_stats(self) -> Dict:
|
| 133 |
+
"""Retorna estadísticas de generación"""
|
| 134 |
+
if self.chat_history_ids is not None:
|
| 135 |
+
return {
|
| 136 |
+
"history_length": self.chat_history_ids.shape[1],
|
| 137 |
+
"device": str(self.chat_history_ids.device)
|
| 138 |
+
}
|
| 139 |
+
return {"history_length": 0, "device": "N/A"}
|
mypy.ini
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[mypy]
|
| 2 |
+
python_version = 3.9
|
| 3 |
+
warn_return_any = True
|
| 4 |
+
warn_unused_configs = True
|
| 5 |
+
disallow_untyped_defs = False
|
| 6 |
+
disallow_incomplete_defs = False
|
| 7 |
+
check_untyped_defs = True
|
| 8 |
+
disallow_untyped_decorators = False
|
| 9 |
+
no_implicit_optional = True
|
| 10 |
+
warn_redundant_casts = True
|
| 11 |
+
warn_unused_ignores = True
|
| 12 |
+
warn_no_return = True
|
| 13 |
+
warn_unreachable = True
|
| 14 |
+
ignore_missing_imports = True
|
| 15 |
+
strict_optional = False
|
| 16 |
+
|
| 17 |
+
# Ignorar errores específicos de módulos externos
|
| 18 |
+
[mypy-transformers.*]
|
| 19 |
+
ignore_missing_imports = True
|
| 20 |
+
|
| 21 |
+
[mypy-gradio.*]
|
| 22 |
+
ignore_missing_imports = True
|
| 23 |
+
|
| 24 |
+
[mypy-torch.*]
|
| 25 |
+
ignore_missing_imports = True
|
| 26 |
+
|
| 27 |
+
[mypy-models.*]
|
| 28 |
+
ignore_missing_imports = True
|
| 29 |
+
|
| 30 |
+
[mypy-ui.*]
|
| 31 |
+
ignore_missing_imports = True
|
| 32 |
+
|
| 33 |
+
[mypy-config.*]
|
| 34 |
+
ignore_missing_imports = True
|
package.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "gpt-local",
|
| 3 |
+
"version": "1.0.0",
|
| 4 |
+
"description": "GPT de texto local usando Hugging Face Transformers",
|
| 5 |
+
"main": "main.py",
|
| 6 |
+
"scripts": {
|
| 7 |
+
"start": "python3 main.py",
|
| 8 |
+
"chat": "python3 chat_terminal.py",
|
| 9 |
+
"test": "python3 test_gpt.py",
|
| 10 |
+
"install": "pip3 install -r requirements.txt",
|
| 11 |
+
"docker:build": "docker build -t gpt-local .",
|
| 12 |
+
"docker:run": "docker run -it -p 7860:7860 gpt-local",
|
| 13 |
+
"docker:compose": "docker-compose up",
|
| 14 |
+
"clean": "find . -name '__pycache__' -type d -exec rm -rf {} + 2>/dev/null || true"
|
| 15 |
+
},
|
| 16 |
+
"keywords": [
|
| 17 |
+
"gpt",
|
| 18 |
+
"transformers",
|
| 19 |
+
"huggingface",
|
| 20 |
+
"local",
|
| 21 |
+
"ai",
|
| 22 |
+
"nlp",
|
| 23 |
+
"chat",
|
| 24 |
+
"python"
|
| 25 |
+
],
|
| 26 |
+
"author": "Usuario Local",
|
| 27 |
+
"license": "MIT",
|
| 28 |
+
"dependencies": {
|
| 29 |
+
"python": ">=3.8"
|
| 30 |
+
},
|
| 31 |
+
"engines": {
|
| 32 |
+
"python": ">=3.8"
|
| 33 |
+
},
|
| 34 |
+
"repository": {
|
| 35 |
+
"type": "git",
|
| 36 |
+
"url": "local"
|
| 37 |
+
},
|
| 38 |
+
"config": {
|
| 39 |
+
"model_cache": "~/.cache/huggingface",
|
| 40 |
+
"default_model": "microsoft/DialoGPT-small",
|
| 41 |
+
"web_port": 7860
|
| 42 |
+
}
|
| 43 |
+
}
|
requirements-dev.txt
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Development dependencies (opcional)
|
| 2 |
+
torch>=2.0.0
|
| 3 |
+
transformers>=4.21.0
|
| 4 |
+
gradio>=3.50.0,<5.0.0
|
| 5 |
+
accelerate>=0.20.0
|
| 6 |
+
sentencepiece>=0.1.99
|
| 7 |
+
|
| 8 |
+
# Core ML dependencies
|
| 9 |
+
numpy>=1.21.0,<2.0.0
|
| 10 |
+
pandas>=1.3.0,<3.0.0
|
| 11 |
+
matplotlib>=3.5.0,<4.0.0
|
| 12 |
+
requests>=2.25.0,<3.0.0
|
| 13 |
+
tqdm>=4.60.0,<5.0.0
|
| 14 |
+
|
| 15 |
+
# Development tools (opcional)
|
| 16 |
+
jupyter>=1.0.0
|
| 17 |
+
ipython>=7.0.0
|
| 18 |
+
black>=22.0.0
|
| 19 |
+
flake8>=4.0.0
|
| 20 |
+
pytest>=6.0.0
|
| 21 |
+
|
| 22 |
+
# Optional GPU support
|
| 23 |
+
# torch-audio # descomenta si necesitas audio
|
| 24 |
+
# torchvision # descomenta si necesitas visión por computadora
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dependencias principales
|
| 2 |
+
torch>=2.0.0,<3.0.0
|
| 3 |
+
transformers>=4.21.0,<5.0.0
|
| 4 |
+
gradio>=4.0.0,<5.0.0
|
| 5 |
+
accelerate>=0.20.0,<1.0.0
|
| 6 |
+
sentencepiece>=0.1.99,<1.0.0
|
| 7 |
+
|
| 8 |
+
# Dependencias adicionales para estabilidad
|
| 9 |
+
numpy>=1.21.0,<2.0.0
|
| 10 |
+
typing-extensions>=4.0.0
|
setup-cli.sh
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# 🔧 Script de configuración para CLI tools
|
| 3 |
+
# Configura Hugging Face CLI, Docker CLI y GitHub CLI
|
| 4 |
+
|
| 5 |
+
set -e
|
| 6 |
+
|
| 7 |
+
echo "🚀 Configurando CLI Tools para GPT Local..."
|
| 8 |
+
|
| 9 |
+
# Colores para output
|
| 10 |
+
RED='\033[0;31m'
|
| 11 |
+
GREEN='\033[0;32m'
|
| 12 |
+
YELLOW='\033[1;33m'
|
| 13 |
+
BLUE='\033[0;34m'
|
| 14 |
+
NC='\033[0m' # No Color
|
| 15 |
+
|
| 16 |
+
# Función para imprimir con colores
|
| 17 |
+
print_status() {
|
| 18 |
+
echo -e "${BLUE}[INFO]${NC} $1"
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
print_success() {
|
| 22 |
+
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
print_warning() {
|
| 26 |
+
echo -e "${YELLOW}[WARNING]${NC} $1"
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
print_error() {
|
| 30 |
+
echo -e "${RED}[ERROR]${NC} $1"
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
# 1. Instalar Hugging Face CLI
|
| 34 |
+
install_huggingface_cli() {
|
| 35 |
+
print_status "Instalando Hugging Face CLI..."
|
| 36 |
+
|
| 37 |
+
if command -v huggingface-cli &> /dev/null; then
|
| 38 |
+
print_success "Hugging Face CLI ya está instalado"
|
| 39 |
+
else
|
| 40 |
+
pip install --upgrade huggingface_hub
|
| 41 |
+
print_success "Hugging Face CLI instalado"
|
| 42 |
+
fi
|
| 43 |
+
|
| 44 |
+
# Verificar instalación
|
| 45 |
+
huggingface-cli --version || print_warning "Error verificando Hugging Face CLI"
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
# 2. Configurar Docker CLI (verificar instalación)
|
| 49 |
+
check_docker() {
|
| 50 |
+
print_status "Verificando Docker CLI..."
|
| 51 |
+
|
| 52 |
+
if command -v docker &> /dev/null; then
|
| 53 |
+
print_success "Docker CLI encontrado: $(docker --version)"
|
| 54 |
+
|
| 55 |
+
# Verificar que Docker daemon esté ejecutándose
|
| 56 |
+
if docker info &> /dev/null; then
|
| 57 |
+
print_success "Docker daemon está ejecutándose"
|
| 58 |
+
else
|
| 59 |
+
print_warning "Docker CLI instalado pero daemon no está ejecutándose"
|
| 60 |
+
print_status "Inicia Docker Desktop o el servicio Docker"
|
| 61 |
+
fi
|
| 62 |
+
else
|
| 63 |
+
print_error "Docker CLI no encontrado"
|
| 64 |
+
print_status "Instala Docker desde: https://docs.docker.com/get-docker/"
|
| 65 |
+
exit 1
|
| 66 |
+
fi
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
# 3. Instalar GitHub CLI (opcional)
|
| 70 |
+
install_github_cli() {
|
| 71 |
+
print_status "Verificando GitHub CLI..."
|
| 72 |
+
|
| 73 |
+
if command -v gh &> /dev/null; then
|
| 74 |
+
print_success "GitHub CLI ya está instalado: $(gh --version | head -1)"
|
| 75 |
+
else
|
| 76 |
+
print_warning "GitHub CLI no encontrado"
|
| 77 |
+
print_status "Instalando GitHub CLI..."
|
| 78 |
+
|
| 79 |
+
# Detectar OS y instalar apropiadamente
|
| 80 |
+
if [[ "$OSTYPE" == "darwin"* ]]; then
|
| 81 |
+
# macOS
|
| 82 |
+
if command -v brew &> /dev/null; then
|
| 83 |
+
brew install gh
|
| 84 |
+
print_success "GitHub CLI instalado via Homebrew"
|
| 85 |
+
else
|
| 86 |
+
print_warning "Homebrew no encontrado. Instala manualmente desde: https://cli.github.com/"
|
| 87 |
+
fi
|
| 88 |
+
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
| 89 |
+
# Linux
|
| 90 |
+
curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg
|
| 91 |
+
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null
|
| 92 |
+
sudo apt update
|
| 93 |
+
sudo apt install gh
|
| 94 |
+
print_success "GitHub CLI instalado"
|
| 95 |
+
else
|
| 96 |
+
print_warning "OS no soportado para instalación automática. Instala manualmente desde: https://cli.github.com/"
|
| 97 |
+
fi
|
| 98 |
+
fi
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
# 4. Configurar autenticación de Hugging Face
|
| 102 |
+
setup_huggingface_auth() {
|
| 103 |
+
print_status "Configurando autenticación de Hugging Face..."
|
| 104 |
+
|
| 105 |
+
if [ -f "$HOME/.cache/huggingface/token" ]; then
|
| 106 |
+
print_success "Token de Hugging Face ya configurado"
|
| 107 |
+
else
|
| 108 |
+
print_warning "Token de Hugging Face no encontrado"
|
| 109 |
+
print_status "Para configurar tu token:"
|
| 110 |
+
echo "1. Ve a: https://huggingface.co/settings/tokens"
|
| 111 |
+
echo "2. Crea un nuevo token con permisos de lectura"
|
| 112 |
+
echo "3. Ejecuta: huggingface-cli login"
|
| 113 |
+
echo "4. O agrega el token al archivo .env: HUGGINGFACE_TOKEN=hf_tu_token"
|
| 114 |
+
fi
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
# 5. Configurar autenticación de GitHub
|
| 118 |
+
setup_github_auth() {
|
| 119 |
+
print_status "Configurando autenticación de GitHub..."
|
| 120 |
+
|
| 121 |
+
if command -v gh &> /dev/null; then
|
| 122 |
+
if gh auth status &> /dev/null; then
|
| 123 |
+
print_success "GitHub CLI ya está autenticado"
|
| 124 |
+
else
|
| 125 |
+
print_warning "GitHub CLI no está autenticado"
|
| 126 |
+
print_status "Para autenticar ejecuta: gh auth login"
|
| 127 |
+
fi
|
| 128 |
+
else
|
| 129 |
+
print_warning "GitHub CLI no instalado, saltando configuración"
|
| 130 |
+
fi
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
# 6. Verificar Docker Compose
|
| 134 |
+
check_docker_compose() {
|
| 135 |
+
print_status "Verificando Docker Compose..."
|
| 136 |
+
|
| 137 |
+
if command -v docker-compose &> /dev/null; then
|
| 138 |
+
print_success "Docker Compose encontrado: $(docker-compose --version)"
|
| 139 |
+
elif docker compose version &> /dev/null; then
|
| 140 |
+
print_success "Docker Compose (plugin) encontrado: $(docker compose version)"
|
| 141 |
+
else
|
| 142 |
+
print_error "Docker Compose no encontrado"
|
| 143 |
+
print_status "Instala Docker Compose desde: https://docs.docker.com/compose/install/"
|
| 144 |
+
fi
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
# 7. Crear alias útiles
|
| 148 |
+
create_aliases() {
|
| 149 |
+
print_status "Creando aliases útiles..."
|
| 150 |
+
|
| 151 |
+
cat >> ~/.bash_aliases 2>/dev/null << 'EOF' || true
|
| 152 |
+
# GPT Local aliases
|
| 153 |
+
alias gpt-chat="cd /path/to/gpt-local && python3 chat_terminal.py"
|
| 154 |
+
alias gpt-web="cd /path/to/gpt-local && python3 main.py"
|
| 155 |
+
alias gpt-test="cd /path/to/gpt-local && python3 test_gpt.py"
|
| 156 |
+
alias gpt-docker="cd /path/to/gpt-local && docker-compose up --build"
|
| 157 |
+
alias gpt-clean="cd /path/to/gpt-local && python3 utils.py clean"
|
| 158 |
+
EOF
|
| 159 |
+
|
| 160 |
+
# Para zsh
|
| 161 |
+
cat >> ~/.zshrc 2>/dev/null << 'EOF' || true
|
| 162 |
+
# GPT Local aliases
|
| 163 |
+
alias gpt-chat="cd /path/to/gpt-local && python3 chat_terminal.py"
|
| 164 |
+
alias gpt-web="cd /path/to/gpt-local && python3 main.py"
|
| 165 |
+
alias gpt-test="cd /path/to/gpt-local && python3 test_gpt.py"
|
| 166 |
+
alias gpt-docker="cd /path/to/gpt-local && docker-compose up --build"
|
| 167 |
+
alias gpt-clean="cd /path/to/gpt-local && python3 utils.py clean"
|
| 168 |
+
EOF
|
| 169 |
+
|
| 170 |
+
print_success "Aliases creados (recarga tu terminal para usarlos)"
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
# Función principal
|
| 174 |
+
main() {
|
| 175 |
+
print_status "Iniciando configuración de CLI tools..."
|
| 176 |
+
|
| 177 |
+
# Verificar Python
|
| 178 |
+
if ! command -v python3 &> /dev/null; then
|
| 179 |
+
print_error "Python 3 no encontrado. Instala Python 3.9+"
|
| 180 |
+
exit 1
|
| 181 |
+
fi
|
| 182 |
+
|
| 183 |
+
# Ejecutar configuraciones
|
| 184 |
+
install_huggingface_cli
|
| 185 |
+
check_docker
|
| 186 |
+
check_docker_compose
|
| 187 |
+
install_github_cli
|
| 188 |
+
setup_huggingface_auth
|
| 189 |
+
setup_github_auth
|
| 190 |
+
create_aliases
|
| 191 |
+
|
| 192 |
+
print_success "✅ Configuración de CLI tools completada!"
|
| 193 |
+
print_status "Comandos disponibles:"
|
| 194 |
+
echo " - huggingface-cli: Gestión de modelos HF"
|
| 195 |
+
echo " - docker: Containerización"
|
| 196 |
+
echo " - docker-compose: Orquestación"
|
| 197 |
+
echo " - gh: GitHub CLI (opcional)"
|
| 198 |
+
|
| 199 |
+
print_status "Próximos pasos:"
|
| 200 |
+
echo "1. Configura tu token HF: huggingface-cli login"
|
| 201 |
+
echo "2. Autentica GitHub (opcional): gh auth login"
|
| 202 |
+
echo "3. Ejecuta: docker-compose up --build"
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
# Ejecutar script
|
| 206 |
+
main "$@"
|
setup.sh
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Script de instalación y configuración para GPT Local
|
| 4 |
+
|
| 5 |
+
echo "🚀 Configurando GPT Local..."
|
| 6 |
+
|
| 7 |
+
# Verificar Python
|
| 8 |
+
echo "🐍 Verificando Python..."
|
| 9 |
+
if ! command -v python3 &> /dev/null; then
|
| 10 |
+
echo "❌ Python3 no encontrado. Instálalo primero."
|
| 11 |
+
exit 1
|
| 12 |
+
fi
|
| 13 |
+
|
| 14 |
+
echo "✅ Python encontrado: $(python3 --version)"
|
| 15 |
+
|
| 16 |
+
# Crear entorno virtual (opcional)
|
| 17 |
+
read -p "¿Crear entorno virtual? (y/N): " create_venv
|
| 18 |
+
if [[ $create_venv =~ ^[Yy]$ ]]; then
|
| 19 |
+
echo "📦 Creando entorno virtual..."
|
| 20 |
+
python3 -m venv venv
|
| 21 |
+
source venv/bin/activate
|
| 22 |
+
echo "✅ Entorno virtual activado"
|
| 23 |
+
fi
|
| 24 |
+
|
| 25 |
+
# Instalar dependencias
|
| 26 |
+
echo "📦 Instalando dependencias..."
|
| 27 |
+
pip3 install -r requirements.txt
|
| 28 |
+
|
| 29 |
+
# Verificar instalación
|
| 30 |
+
echo "🔍 Verificando instalación..."
|
| 31 |
+
python3 -c "
|
| 32 |
+
import torch, transformers, gradio
|
| 33 |
+
print('✅ Todas las dependencias instaladas')
|
| 34 |
+
print(f'PyTorch: {torch.__version__}')
|
| 35 |
+
print(f'Transformers: {transformers.__version__}')
|
| 36 |
+
print(f'Gradio: {gradio.__version__}')
|
| 37 |
+
"
|
| 38 |
+
|
| 39 |
+
# Crear directorios necesarios
|
| 40 |
+
echo "📁 Creando directorios..."
|
| 41 |
+
mkdir -p models_cache
|
| 42 |
+
mkdir -p logs
|
| 43 |
+
|
| 44 |
+
# Prueba rápida
|
| 45 |
+
echo "🧪 Ejecutando prueba..."
|
| 46 |
+
python3 test_gpt.py
|
| 47 |
+
|
| 48 |
+
echo "🎉 ¡Configuración completada!"
|
| 49 |
+
echo ""
|
| 50 |
+
echo "Para ejecutar:"
|
| 51 |
+
echo " Chat en terminal: python3 chat_terminal.py"
|
| 52 |
+
echo " Interfaz web: python3 main.py"
|
| 53 |
+
echo " Utilidades: python3 utils.py check"
|
test_gpt.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script de prueba para verificar que el GPT Local funciona
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
# Agregar el directorio raíz al path
|
| 10 |
+
sys.path.append(str(Path(__file__).parent))
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
from models.model_loader import ModelLoader
|
| 14 |
+
from models.text_generator import TextGenerator
|
| 15 |
+
|
| 16 |
+
print("🚀 Probando GPT Local...")
|
| 17 |
+
|
| 18 |
+
# Inicializar componentes
|
| 19 |
+
model_loader = ModelLoader()
|
| 20 |
+
text_generator = TextGenerator(model_loader)
|
| 21 |
+
|
| 22 |
+
# Cargar modelo pequeño
|
| 23 |
+
print("📦 Cargando modelo DialoGPT-small...")
|
| 24 |
+
success = model_loader.load_model("microsoft/DialoGPT-small")
|
| 25 |
+
|
| 26 |
+
if success:
|
| 27 |
+
print("✅ Modelo cargado exitosamente")
|
| 28 |
+
|
| 29 |
+
# Prueba básica
|
| 30 |
+
test_message = "Hola"
|
| 31 |
+
print(f"🧪 Probando con: '{test_message}'")
|
| 32 |
+
|
| 33 |
+
response = text_generator.generate_response(test_message, max_length=50)
|
| 34 |
+
print(f"🤖 Respuesta: {response}")
|
| 35 |
+
|
| 36 |
+
print("\n✅ ¡GPT Local funcionando correctamente!")
|
| 37 |
+
|
| 38 |
+
else:
|
| 39 |
+
print("❌ Error al cargar el modelo")
|
| 40 |
+
|
| 41 |
+
except Exception as e:
|
| 42 |
+
print(f"❌ Error: {str(e)}")
|
| 43 |
+
import traceback
|
| 44 |
+
traceback.print_exc()
|
ui/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Módulo de interfaz de usuario
|
ui/gradio_interface.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Interfaz web usando Gradio para el GPT local
|
| 3 |
+
"""
|
| 4 |
+
import gradio as gr
|
| 5 |
+
from models.model_loader import ModelLoader
|
| 6 |
+
from models.text_generator import TextGenerator
|
| 7 |
+
import logging
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
class GradioInterface:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
self.model_loader = ModelLoader()
|
| 14 |
+
self.text_generator = TextGenerator(self.model_loader)
|
| 15 |
+
self.available_models = [
|
| 16 |
+
"microsoft/DialoGPT-small",
|
| 17 |
+
"gpt2",
|
| 18 |
+
"distilgpt2"
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
def load_model_ui(self, model_name):
|
| 22 |
+
"""Cargar modelo desde la interfaz"""
|
| 23 |
+
if self.model_loader.load_model(model_name):
|
| 24 |
+
self.text_generator.reset_chat_history()
|
| 25 |
+
return f"✅ Modelo '{model_name}' cargado exitosamente"
|
| 26 |
+
else:
|
| 27 |
+
return f"❌ Error al cargar el modelo '{model_name}'"
|
| 28 |
+
|
| 29 |
+
def chat_response(self, message, history, temperature, max_length, top_p):
|
| 30 |
+
"""Generar respuesta de chat"""
|
| 31 |
+
if not self.model_loader.is_loaded():
|
| 32 |
+
return history + [("Error", "Por favor, carga un modelo primero")]
|
| 33 |
+
|
| 34 |
+
try:
|
| 35 |
+
# Generar respuesta
|
| 36 |
+
response = self.text_generator.generate_response(
|
| 37 |
+
message,
|
| 38 |
+
temperature=temperature,
|
| 39 |
+
max_length=max_length,
|
| 40 |
+
top_p=top_p
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
# Actualizar historial
|
| 44 |
+
history.append((message, response))
|
| 45 |
+
return history
|
| 46 |
+
|
| 47 |
+
except Exception as e:
|
| 48 |
+
error_msg = f"Error: {str(e)}"
|
| 49 |
+
history.append((message, error_msg))
|
| 50 |
+
return history
|
| 51 |
+
|
| 52 |
+
def generate_text_ui(self, prompt, temperature, max_length, top_p):
|
| 53 |
+
"""Generar texto desde prompt"""
|
| 54 |
+
if not self.model_loader.is_loaded():
|
| 55 |
+
return "Error: Por favor, carga un modelo primero"
|
| 56 |
+
|
| 57 |
+
return self.text_generator.generate_text(
|
| 58 |
+
prompt,
|
| 59 |
+
temperature=temperature,
|
| 60 |
+
max_length=max_length,
|
| 61 |
+
top_p=top_p
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def reset_chat(self):
|
| 65 |
+
"""Reiniciar chat"""
|
| 66 |
+
self.text_generator.reset_chat_history()
|
| 67 |
+
return [], "Chat reiniciado"
|
| 68 |
+
|
| 69 |
+
def get_model_status(self):
|
| 70 |
+
"""Obtener estado del modelo"""
|
| 71 |
+
info = self.model_loader.get_model_info()
|
| 72 |
+
stats = self.text_generator.get_generation_stats()
|
| 73 |
+
|
| 74 |
+
status = f"""
|
| 75 |
+
**Estado del Modelo:**
|
| 76 |
+
- {info['status']}
|
| 77 |
+
- Dispositivo: {info.get('device', 'N/A')}
|
| 78 |
+
- Tipo: {info.get('model_type', 'N/A')}
|
| 79 |
+
- Vocabulario: {info.get('vocab_size', 'N/A')}
|
| 80 |
+
|
| 81 |
+
**Estadísticas de Chat:**
|
| 82 |
+
- Longitud del historial: {stats['history_length']} tokens
|
| 83 |
+
- Dispositivo activo: {stats['device']}
|
| 84 |
+
"""
|
| 85 |
+
return status
|
| 86 |
+
|
| 87 |
+
def create_interface(self):
|
| 88 |
+
"""Crear la interfaz Gradio simplificada"""
|
| 89 |
+
|
| 90 |
+
with gr.Blocks(title="GPT Local") as demo:
|
| 91 |
+
gr.Markdown("# 🤖 GPT Local con Hugging Face")
|
| 92 |
+
|
| 93 |
+
with gr.Tab("💬 Chat"):
|
| 94 |
+
chatbot = gr.Chatbot(height=400)
|
| 95 |
+
msg = gr.Textbox(placeholder="Escribe tu mensaje...", label="Mensaje")
|
| 96 |
+
|
| 97 |
+
with gr.Row():
|
| 98 |
+
send_btn = gr.Button("Enviar", variant="primary")
|
| 99 |
+
clear_btn = gr.Button("Limpiar")
|
| 100 |
+
|
| 101 |
+
# Parámetros básicos
|
| 102 |
+
with gr.Row():
|
| 103 |
+
temperature = gr.Slider(0.1, 2.0, 0.7, label="Temperatura")
|
| 104 |
+
max_length = gr.Slider(50, 500, 200, label="Longitud Máxima")
|
| 105 |
+
|
| 106 |
+
with gr.Tab("⚙️ Configuración"):
|
| 107 |
+
model_dropdown = gr.Dropdown(
|
| 108 |
+
choices=self.available_models,
|
| 109 |
+
value=self.available_models[0],
|
| 110 |
+
label="Modelo"
|
| 111 |
+
)
|
| 112 |
+
load_btn = gr.Button("Cargar Modelo")
|
| 113 |
+
status_text = gr.Textbox(label="Estado", interactive=False)
|
| 114 |
+
|
| 115 |
+
# Event handlers simplificados
|
| 116 |
+
def respond(message, history, temp, max_len):
|
| 117 |
+
if not message.strip():
|
| 118 |
+
return history, ""
|
| 119 |
+
|
| 120 |
+
try:
|
| 121 |
+
response = self.text_generator.generate_response(
|
| 122 |
+
message, temperature=temp, max_length=max_len
|
| 123 |
+
)
|
| 124 |
+
history.append((message, response))
|
| 125 |
+
return history, ""
|
| 126 |
+
except Exception as e:
|
| 127 |
+
history.append((message, f"Error: {str(e)}"))
|
| 128 |
+
return history, ""
|
| 129 |
+
|
| 130 |
+
def load_model(model_name):
|
| 131 |
+
try:
|
| 132 |
+
if self.model_loader.load_model(model_name):
|
| 133 |
+
self.text_generator.reset_chat_history()
|
| 134 |
+
return f"✅ Modelo '{model_name}' cargado"
|
| 135 |
+
else:
|
| 136 |
+
return f"❌ Error al cargar '{model_name}'"
|
| 137 |
+
except Exception as e:
|
| 138 |
+
return f"❌ Error: {str(e)}"
|
| 139 |
+
|
| 140 |
+
def clear_chat():
|
| 141 |
+
self.text_generator.reset_chat_history()
|
| 142 |
+
return []
|
| 143 |
+
|
| 144 |
+
# Conectar eventos
|
| 145 |
+
msg.submit(respond, [msg, chatbot, temperature, max_length], [chatbot, msg])
|
| 146 |
+
send_btn.click(respond, [msg, chatbot, temperature, max_length], [chatbot, msg])
|
| 147 |
+
clear_btn.click(clear_chat, outputs=chatbot)
|
| 148 |
+
load_btn.click(load_model, model_dropdown, status_text)
|
| 149 |
+
|
| 150 |
+
# Cargar modelo por defecto
|
| 151 |
+
demo.load(lambda: load_model(self.available_models[0]), outputs=status_text)
|
| 152 |
+
|
| 153 |
+
return demo
|
| 154 |
+
|
| 155 |
+
def launch(self, **kwargs):
|
| 156 |
+
"""Lanzar la interfaz"""
|
| 157 |
+
demo = self.create_interface()
|
| 158 |
+
demo.launch(**kwargs)
|
utils.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script de utilidades para GPT Local
|
| 4 |
+
Manejo de modelos, limpieza, actualización
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
import shutil
|
| 10 |
+
import subprocess
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
def check_environment():
|
| 14 |
+
"""Verificar el entorno y dependencias"""
|
| 15 |
+
print("🔍 Verificando entorno...")
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
import torch
|
| 19 |
+
import transformers
|
| 20 |
+
import gradio as gr
|
| 21 |
+
print(f"✅ PyTorch: {torch.__version__}")
|
| 22 |
+
print(f"✅ Transformers: {transformers.__version__}")
|
| 23 |
+
print(f"✅ Gradio: {gr.__version__}")
|
| 24 |
+
print(f"✅ CUDA disponible: {torch.cuda.is_available()}")
|
| 25 |
+
print(f"✅ MPS disponible: {torch.backends.mps.is_available() if hasattr(torch.backends, 'mps') else 'N/A'}")
|
| 26 |
+
return True
|
| 27 |
+
except ImportError as e:
|
| 28 |
+
print(f"❌ Error de importación: {e}")
|
| 29 |
+
return False
|
| 30 |
+
|
| 31 |
+
def clean_cache():
|
| 32 |
+
"""Limpiar cache de modelos"""
|
| 33 |
+
print("🧹 Limpiando cache...")
|
| 34 |
+
|
| 35 |
+
cache_dirs = [
|
| 36 |
+
Path.home() / ".cache" / "huggingface",
|
| 37 |
+
Path("models_cache"),
|
| 38 |
+
Path("__pycache__"),
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
for cache_dir in cache_dirs:
|
| 42 |
+
if cache_dir.exists():
|
| 43 |
+
size = sum(f.stat().st_size for f in cache_dir.rglob('*') if f.is_file())
|
| 44 |
+
print(f"📁 {cache_dir}: {size / (1024*1024*1024):.2f} GB")
|
| 45 |
+
|
| 46 |
+
response = input(f"¿Eliminar {cache_dir}? (y/N): ")
|
| 47 |
+
if response.lower() == 'y':
|
| 48 |
+
shutil.rmtree(cache_dir)
|
| 49 |
+
print(f"✅ {cache_dir} eliminado")
|
| 50 |
+
|
| 51 |
+
def install_dependencies():
|
| 52 |
+
"""Instalar dependencias"""
|
| 53 |
+
print("📦 Instalando dependencias...")
|
| 54 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
|
| 55 |
+
|
| 56 |
+
def update_dependencies():
|
| 57 |
+
"""Actualizar dependencias"""
|
| 58 |
+
print("⬆️ Actualizando dependencias...")
|
| 59 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "--upgrade", "-r", "requirements.txt"])
|
| 60 |
+
|
| 61 |
+
def list_models():
|
| 62 |
+
"""Listar modelos disponibles localmente"""
|
| 63 |
+
print("📋 Modelos disponibles:")
|
| 64 |
+
|
| 65 |
+
cache_dir = Path.home() / ".cache" / "huggingface" / "hub"
|
| 66 |
+
if cache_dir.exists():
|
| 67 |
+
models = [d.name for d in cache_dir.iterdir() if d.is_dir()]
|
| 68 |
+
for model in models:
|
| 69 |
+
print(f" - {model}")
|
| 70 |
+
else:
|
| 71 |
+
print(" No hay modelos en cache")
|
| 72 |
+
|
| 73 |
+
def download_model(model_name):
|
| 74 |
+
"""Descargar un modelo específico"""
|
| 75 |
+
print(f"⬇️ Descargando modelo: {model_name}")
|
| 76 |
+
|
| 77 |
+
try:
|
| 78 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 79 |
+
|
| 80 |
+
print("Descargando tokenizer...")
|
| 81 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 82 |
+
|
| 83 |
+
print("Descargando modelo...")
|
| 84 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 85 |
+
|
| 86 |
+
print(f"✅ Modelo {model_name} descargado exitosamente")
|
| 87 |
+
|
| 88 |
+
except Exception as e:
|
| 89 |
+
print(f"❌ Error al descargar: {e}")
|
| 90 |
+
|
| 91 |
+
def show_usage():
|
| 92 |
+
"""Mostrar información de uso"""
|
| 93 |
+
print("""
|
| 94 |
+
🤖 GPT Local - Utilidades
|
| 95 |
+
|
| 96 |
+
Comandos disponibles:
|
| 97 |
+
check - Verificar entorno
|
| 98 |
+
clean - Limpiar cache
|
| 99 |
+
install - Instalar dependencias
|
| 100 |
+
update - Actualizar dependencias
|
| 101 |
+
models - Listar modelos
|
| 102 |
+
download - Descargar modelo específico
|
| 103 |
+
|
| 104 |
+
Ejemplos:
|
| 105 |
+
python3 utils.py check
|
| 106 |
+
python3 utils.py download microsoft/DialoGPT-small
|
| 107 |
+
python3 utils.py clean
|
| 108 |
+
""")
|
| 109 |
+
|
| 110 |
+
def main():
|
| 111 |
+
if len(sys.argv) < 2:
|
| 112 |
+
show_usage()
|
| 113 |
+
return
|
| 114 |
+
|
| 115 |
+
command = sys.argv[1]
|
| 116 |
+
|
| 117 |
+
if command == "check":
|
| 118 |
+
check_environment()
|
| 119 |
+
elif command == "clean":
|
| 120 |
+
clean_cache()
|
| 121 |
+
elif command == "install":
|
| 122 |
+
install_dependencies()
|
| 123 |
+
elif command == "update":
|
| 124 |
+
update_dependencies()
|
| 125 |
+
elif command == "models":
|
| 126 |
+
list_models()
|
| 127 |
+
elif command == "download":
|
| 128 |
+
if len(sys.argv) < 3:
|
| 129 |
+
print("❌ Especifica el nombre del modelo")
|
| 130 |
+
return
|
| 131 |
+
download_model(sys.argv[2])
|
| 132 |
+
else:
|
| 133 |
+
print(f"❌ Comando desconocido: {command}")
|
| 134 |
+
show_usage()
|
| 135 |
+
|
| 136 |
+
if __name__ == "__main__":
|
| 137 |
+
main()
|