Spaces:
Paused
Paused
Commit
·
fa720ef
1
Parent(s):
84ab11b
Corregido error de permisos en Hugging Face (caché personalizada)
Browse files
api.py
CHANGED
|
@@ -1,26 +1,31 @@
|
|
| 1 |
-
# Actualización forzada para subir a Hugging Face..
|
| 2 |
from fastapi import FastAPI
|
| 3 |
from pydantic import BaseModel
|
| 4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
import torch
|
| 6 |
from huggingface_hub import hf_hub_download
|
|
|
|
| 7 |
|
| 8 |
# ✅ Inicializar FastAPI
|
| 9 |
app = FastAPI()
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
# ✅ Nombre del modelo en Hugging Face Hub
|
| 12 |
HUGGING_FACE_REPO = "fcp2207/Phi-2" # Asegúrate de que sea el nombre correcto en Hugging Face
|
| 13 |
MODEL_FILENAME = "phi2_finetuned.pth" # Nombre del archivo en Hugging Face
|
| 14 |
|
| 15 |
-
# ✅ Descargar el modelo desde Hugging Face (
|
| 16 |
model_path = hf_hub_download(
|
| 17 |
repo_id=HUGGING_FACE_REPO,
|
| 18 |
-
filename=MODEL_FILENAME
|
|
|
|
| 19 |
)
|
| 20 |
|
| 21 |
# ✅ Cargar el tokenizer y el modelo base
|
| 22 |
-
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
|
| 23 |
-
model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2")
|
| 24 |
|
| 25 |
# ✅ Cargar los pesos del modelo
|
| 26 |
model.load_state_dict(torch.load(model_path, map_location="cpu"))
|
|
@@ -60,3 +65,6 @@ if __name__ == "__main__":
|
|
| 60 |
|
| 61 |
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from pydantic import BaseModel
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
import torch
|
| 5 |
from huggingface_hub import hf_hub_download
|
| 6 |
+
import os
|
| 7 |
|
| 8 |
# ✅ Inicializar FastAPI
|
| 9 |
app = FastAPI()
|
| 10 |
|
| 11 |
+
# ✅ Crear directorio de caché para evitar problemas de permisos
|
| 12 |
+
CACHE_DIR = "./cache"
|
| 13 |
+
os.makedirs(CACHE_DIR, exist_ok=True)
|
| 14 |
+
|
| 15 |
# ✅ Nombre del modelo en Hugging Face Hub
|
| 16 |
HUGGING_FACE_REPO = "fcp2207/Phi-2" # Asegúrate de que sea el nombre correcto en Hugging Face
|
| 17 |
MODEL_FILENAME = "phi2_finetuned.pth" # Nombre del archivo en Hugging Face
|
| 18 |
|
| 19 |
+
# ✅ Descargar el modelo desde Hugging Face (especificando caché)
|
| 20 |
model_path = hf_hub_download(
|
| 21 |
repo_id=HUGGING_FACE_REPO,
|
| 22 |
+
filename=MODEL_FILENAME,
|
| 23 |
+
cache_dir=CACHE_DIR # Ruta de caché permitida
|
| 24 |
)
|
| 25 |
|
| 26 |
# ✅ Cargar el tokenizer y el modelo base
|
| 27 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", cache_dir=CACHE_DIR)
|
| 28 |
+
model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2", cache_dir=CACHE_DIR)
|
| 29 |
|
| 30 |
# ✅ Cargar los pesos del modelo
|
| 31 |
model.load_state_dict(torch.load(model_path, map_location="cpu"))
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|