Spaces:
Runtime error
Runtime error
Rx Codex AI commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,7 +12,7 @@ import os
|
|
| 12 |
class ImageRequest(BaseModel):
|
| 13 |
prompt: str
|
| 14 |
negative_prompt: str = ""
|
| 15 |
-
steps: int =
|
| 16 |
|
| 17 |
class ImageResponse(BaseModel):
|
| 18 |
image_base64: str
|
|
@@ -23,30 +23,26 @@ app_state = {}
|
|
| 23 |
@asynccontextmanager
|
| 24 |
async def lifespan(app: FastAPI):
|
| 25 |
# Load the model on startup
|
| 26 |
-
hf_token = os.getenv("HF_TOKEN")
|
| 27 |
if not hf_token:
|
| 28 |
raise RuntimeError("HF_TOKEN environment variable not set! Please add it in the Space settings.")
|
| 29 |
|
| 30 |
-
# --- ***
|
| 31 |
-
#
|
| 32 |
-
model_id = "
|
| 33 |
-
# --- ****************************************** ---
|
| 34 |
|
| 35 |
-
print(f"Loading model: {model_id}")
|
| 36 |
|
|
|
|
| 37 |
pipe = AutoPipelineForText2Image.from_pretrained(
|
| 38 |
model_id,
|
| 39 |
-
torch_dtype=torch.float16,
|
| 40 |
-
variant="fp16", # Use the optimized fp16 variant
|
| 41 |
-
use_safetensors=True,
|
| 42 |
token=hf_token
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
#
|
| 46 |
-
pipe.enable_model_cpu_offload()
|
| 47 |
|
| 48 |
app_state["pipe"] = pipe
|
| 49 |
-
print("Model loaded successfully.")
|
| 50 |
yield
|
| 51 |
# Clean up on shutdown
|
| 52 |
app_state.clear()
|
|
@@ -57,7 +53,7 @@ app = FastAPI(lifespan=lifespan)
|
|
| 57 |
|
| 58 |
@app.get("/")
|
| 59 |
def root():
|
| 60 |
-
return {"status": "Text-to-Image API is running"}
|
| 61 |
|
| 62 |
@app.post("/generate-image", response_model=ImageResponse)
|
| 63 |
def generate_image(request: ImageRequest):
|
|
|
|
| 12 |
class ImageRequest(BaseModel):
|
| 13 |
prompt: str
|
| 14 |
negative_prompt: str = ""
|
| 15 |
+
steps: int = 20 # Lowered for faster CPU inference
|
| 16 |
|
| 17 |
class ImageResponse(BaseModel):
|
| 18 |
image_base64: str
|
|
|
|
| 23 |
@asynccontextmanager
|
| 24 |
async def lifespan(app: FastAPI):
|
| 25 |
# Load the model on startup
|
| 26 |
+
hf_token = os.getenv("HF_TOKEN") # Still needed to accept terms
|
| 27 |
if not hf_token:
|
| 28 |
raise RuntimeError("HF_TOKEN environment variable not set! Please add it in the Space settings.")
|
| 29 |
|
| 30 |
+
# --- *** THESE ARE THE CHANGES FOR CPU *** ---
|
| 31 |
+
# 1. Use the smaller Stable Diffusion v1.5 model
|
| 32 |
+
model_id = "runwayml/stable-diffusion-v1-5"
|
|
|
|
| 33 |
|
| 34 |
+
print(f"Loading model: {model_id} for CPU...")
|
| 35 |
|
| 36 |
+
# 2. Load the pipeline without GPU-specific settings
|
| 37 |
pipe = AutoPipelineForText2Image.from_pretrained(
|
| 38 |
model_id,
|
|
|
|
|
|
|
|
|
|
| 39 |
token=hf_token
|
| 40 |
+
)
|
| 41 |
+
# Note: We do not use .to("cuda")
|
| 42 |
+
# --- ************************************** ---
|
|
|
|
| 43 |
|
| 44 |
app_state["pipe"] = pipe
|
| 45 |
+
print("Model loaded successfully onto CPU.")
|
| 46 |
yield
|
| 47 |
# Clean up on shutdown
|
| 48 |
app_state.clear()
|
|
|
|
| 53 |
|
| 54 |
@app.get("/")
|
| 55 |
def root():
|
| 56 |
+
return {"status": "Text-to-Image CPU API is running"}
|
| 57 |
|
| 58 |
@app.post("/generate-image", response_model=ImageResponse)
|
| 59 |
def generate_image(request: ImageRequest):
|