Spaces:
Paused
Paused
Commit ·
2ae6084
1
Parent(s): 7071781
Corrección: ejecutar fusión antes de iniciar API
Browse files- app.py +37 -16
- fusion_modelo.py +12 -10
- requirements.txt +3 -3
app.py
CHANGED
|
@@ -1,39 +1,60 @@
|
|
| 1 |
import os
|
| 2 |
import subprocess
|
| 3 |
-
from fastapi import FastAPI
|
| 4 |
import torch
|
| 5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 6 |
-
from
|
|
|
|
| 7 |
|
| 8 |
# ✅ Configuración
|
| 9 |
-
BASE_MODEL = "microsoft/phi-2"
|
| 10 |
-
REPO_ID = "fcp2207/Fusion_modelo_Phi2"
|
| 11 |
-
MODEL_FILENAME = "phi2_finetuned.pth"
|
| 12 |
SAVE_PATH = "phi2_full_model"
|
|
|
|
| 13 |
|
| 14 |
-
# ✅
|
| 15 |
if not os.path.exists(SAVE_PATH):
|
| 16 |
print("🔄 Ejecutando fusion_modelo.py para fusionar el modelo...")
|
| 17 |
try:
|
| 18 |
-
# Ejecuta fusion_modelo.py antes de la API
|
| 19 |
subprocess.run(["python", "fusion_modelo.py"], check=True)
|
| 20 |
-
print("✅ `fusion_modelo.py` ejecutado correctamente.")
|
| 21 |
except subprocess.CalledProcessError as e:
|
| 22 |
print(f"❌ Error ejecutando fusion_modelo.py: {e}")
|
| 23 |
-
exit(1)
|
| 24 |
-
|
| 25 |
else:
|
| 26 |
print(f"✅ El modelo ya está fusionado en {SAVE_PATH}.")
|
| 27 |
|
| 28 |
-
# ✅ Inicializar
|
| 29 |
app = FastAPI()
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
@app.get("/")
|
| 32 |
def home():
|
| 33 |
-
return {"message": "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
@app.get("/status")
|
| 36 |
-
def status():
|
| 37 |
-
"""Devuelve el estado del modelo"""
|
| 38 |
-
return {"status": "Modelo fusionado y listo para usarse", "model_path": SAVE_PATH}
|
| 39 |
|
|
|
|
| 1 |
import os
|
| 2 |
import subprocess
|
|
|
|
| 3 |
import torch
|
| 4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
+
from fastapi import FastAPI
|
| 6 |
+
from pydantic import BaseModel
|
| 7 |
|
| 8 |
# ✅ Configuración
|
|
|
|
|
|
|
|
|
|
| 9 |
SAVE_PATH = "phi2_full_model"
|
| 10 |
+
MODEL_REPO = "fcp2207/Fusion_modelo_Phi2"
|
| 11 |
|
| 12 |
+
# ✅ Ejecutar `fusion_modelo.py` si el modelo no existe
|
| 13 |
if not os.path.exists(SAVE_PATH):
|
| 14 |
print("🔄 Ejecutando fusion_modelo.py para fusionar el modelo...")
|
| 15 |
try:
|
|
|
|
| 16 |
subprocess.run(["python", "fusion_modelo.py"], check=True)
|
|
|
|
| 17 |
except subprocess.CalledProcessError as e:
|
| 18 |
print(f"❌ Error ejecutando fusion_modelo.py: {e}")
|
| 19 |
+
exit(1)
|
|
|
|
| 20 |
else:
|
| 21 |
print(f"✅ El modelo ya está fusionado en {SAVE_PATH}.")
|
| 22 |
|
| 23 |
+
# ✅ Inicializar FastAPI
|
| 24 |
app = FastAPI()
|
| 25 |
|
| 26 |
+
# ✅ Cargar modelo y tokenizer
|
| 27 |
+
print("🔄 Cargando modelo fusionado en memoria...")
|
| 28 |
+
try:
|
| 29 |
+
model = AutoModelForCausalLM.from_pretrained(SAVE_PATH, torch_dtype=torch.float16, device_map="cpu")
|
| 30 |
+
tokenizer = AutoTokenizer.from_pretrained(SAVE_PATH)
|
| 31 |
+
print("✅ Modelo y tokenizer cargados correctamente.")
|
| 32 |
+
except Exception as e:
|
| 33 |
+
print(f"❌ Error al cargar el modelo fusionado: {e}")
|
| 34 |
+
exit(1)
|
| 35 |
+
|
| 36 |
+
# ✅ Definir la estructura de la solicitud
|
| 37 |
+
class InputText(BaseModel):
|
| 38 |
+
input_text: str
|
| 39 |
+
|
| 40 |
@app.get("/")
|
| 41 |
def home():
|
| 42 |
+
return {"message": "🚀 API de Chatbot con Phi-2 en funcionamiento"}
|
| 43 |
+
|
| 44 |
+
@app.post("/predict/")
|
| 45 |
+
def predict(request: InputText):
|
| 46 |
+
"""Genera una respuesta basada en el input del usuario."""
|
| 47 |
+
inputs = tokenizer(request.input_text, return_tensors="pt", padding=True, truncation=True, max_length=512)
|
| 48 |
+
|
| 49 |
+
with torch.no_grad():
|
| 50 |
+
outputs = model.generate(**inputs, max_length=150)
|
| 51 |
+
|
| 52 |
+
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 53 |
+
return {"response": response_text}
|
| 54 |
+
|
| 55 |
+
# ✅ Ejecutar el servidor
|
| 56 |
+
if __name__ == "__main__":
|
| 57 |
+
import uvicorn
|
| 58 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
fusion_modelo.py
CHANGED
|
@@ -5,18 +5,19 @@ import os
|
|
| 5 |
|
| 6 |
# ✅ Configuración
|
| 7 |
BASE_MODEL = "microsoft/phi-2"
|
| 8 |
-
|
| 9 |
-
|
| 10 |
SAVE_PATH = "phi2_full_model"
|
|
|
|
| 11 |
|
| 12 |
# ✅ Descargar pesos fine-tuned desde Hugging Face
|
| 13 |
-
print("🔄 Descargando pesos fine-tuned...")
|
| 14 |
try:
|
| 15 |
-
model_path = hf_hub_download(
|
| 16 |
-
print(f"✅ Pesos descargados correctamente
|
| 17 |
except Exception as e:
|
| 18 |
print(f"❌ Error al descargar los pesos: {str(e)}")
|
| 19 |
-
exit(1)
|
| 20 |
|
| 21 |
# ✅ Cargar modelo base optimizado para CPU
|
| 22 |
print("🔄 Cargando modelo base en CPU...")
|
|
@@ -60,12 +61,13 @@ else:
|
|
| 60 |
print("🚀 Subiendo modelo fusionado a Hugging Face...")
|
| 61 |
try:
|
| 62 |
upload_folder(
|
| 63 |
-
repo_id=
|
| 64 |
-
folder_path=SAVE_PATH,
|
| 65 |
commit_message="Subiendo modelo fusionado",
|
| 66 |
-
use_auth_token=True # 🔹 Forzar autenticación
|
| 67 |
)
|
| 68 |
-
print(f"✅ ¡Modelo subido exitosamente a https://huggingface.co/{
|
| 69 |
except Exception as e:
|
| 70 |
print(f"❌ Error al subir el modelo a Hugging Face: {str(e)}")
|
| 71 |
|
|
|
|
|
|
| 5 |
|
| 6 |
# ✅ Configuración
|
| 7 |
BASE_MODEL = "microsoft/phi-2"
|
| 8 |
+
FINE_TUNED_REPO = "fcp2207/Phi-2" # Repositorio donde está el modelo fine-tuned
|
| 9 |
+
FINE_TUNED_FILENAME = "phi2_finetuned.pth"
|
| 10 |
SAVE_PATH = "phi2_full_model"
|
| 11 |
+
UPLOAD_REPO = "fcp2207/Fusion_modelo_Phi2" # Donde subiremos el modelo fusionado
|
| 12 |
|
| 13 |
# ✅ Descargar pesos fine-tuned desde Hugging Face
|
| 14 |
+
print("🔄 Descargando pesos fine-tuned desde Hugging Face...")
|
| 15 |
try:
|
| 16 |
+
model_path = hf_hub_download(repo_id=FINE_TUNED_REPO, filename=FINE_TUNED_FILENAME)
|
| 17 |
+
print(f"✅ Pesos descargados correctamente: {model_path}")
|
| 18 |
except Exception as e:
|
| 19 |
print(f"❌ Error al descargar los pesos: {str(e)}")
|
| 20 |
+
exit(1)
|
| 21 |
|
| 22 |
# ✅ Cargar modelo base optimizado para CPU
|
| 23 |
print("🔄 Cargando modelo base en CPU...")
|
|
|
|
| 61 |
print("🚀 Subiendo modelo fusionado a Hugging Face...")
|
| 62 |
try:
|
| 63 |
upload_folder(
|
| 64 |
+
repo_id=UPLOAD_REPO,
|
| 65 |
+
folder_path=SAVE_PATH,
|
| 66 |
commit_message="Subiendo modelo fusionado",
|
| 67 |
+
use_auth_token=True # 🔹 Forzar autenticación si es necesario
|
| 68 |
)
|
| 69 |
+
print(f"✅ ¡Modelo subido exitosamente a https://huggingface.co/{UPLOAD_REPO}!")
|
| 70 |
except Exception as e:
|
| 71 |
print(f"❌ Error al subir el modelo a Hugging Face: {str(e)}")
|
| 72 |
|
| 73 |
+
|
requirements.txt
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
torch
|
| 2 |
-
transformers
|
| 3 |
-
huggingface_hub
|
| 4 |
fastapi
|
| 5 |
uvicorn
|
|
|
|
|
|
|
|
|
|
| 6 |
accelerate>=0.26.0
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
fastapi
|
| 2 |
uvicorn
|
| 3 |
+
transformers
|
| 4 |
+
torch
|
| 5 |
+
huggingface_hub
|
| 6 |
accelerate>=0.26.0
|