Rx Codex AI commited on
Commit
7d398da
·
verified ·
1 Parent(s): 33d3090

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -25,20 +25,22 @@ async def lifespan(app: FastAPI):
25
  # Load the model on startup
26
  hf_token = os.getenv("HF_TOKEN")
27
  if not hf_token:
28
- raise RuntimeError("HF_TOKEN environment variable not set!")
29
 
30
- model_id = "rxmha125/sdxl-base-1.0-private" # Your private model ID
 
 
 
 
31
  print(f"Loading model: {model_id}")
32
 
33
- # --- *** THIS IS THE CORRECTED PART *** ---
34
- # We removed variant="fp16" and use_safetensors=True
35
- # to load the available .bin files instead of the missing .safetensors.
36
  pipe = AutoPipelineForText2Image.from_pretrained(
37
  model_id,
38
- torch_dtype=torch.float16, # Keep for memory optimization
 
 
39
  token=hf_token
40
  ).to("cuda")
41
- # --- *********************************** ---
42
 
43
  # Optimization for speed and memory
44
  pipe.enable_model_cpu_offload()
@@ -66,14 +68,12 @@ def generate_image(request: ImageRequest):
66
 
67
  print(f"Generating image for prompt: '{request.prompt}'")
68
  try:
69
- # Generate the image
70
  image = pipe(
71
  prompt=request.prompt,
72
  negative_prompt=request.negative_prompt,
73
  num_inference_steps=request.steps
74
  ).images[0]
75
 
76
- # Convert image to Base64
77
  buffer = io.BytesIO()
78
  image.save(buffer, format="PNG")
79
  img_str = base64.b64encode(buffer.getvalue()).decode("utf-8")
 
25
  # Load the model on startup
26
  hf_token = os.getenv("HF_TOKEN")
27
  if not hf_token:
28
+ raise RuntimeError("HF_TOKEN environment variable not set! Please add it in the Space settings.")
29
 
30
+ # --- *** THIS IS THE ONLY LINE THAT CHANGES *** ---
31
+ # We now point directly to the original, public model repository.
32
+ model_id = "stabilityai/stable-diffusion-xl-base-1.0"
33
+ # --- ****************************************** ---
34
+
35
  print(f"Loading model: {model_id}")
36
 
 
 
 
37
  pipe = AutoPipelineForText2Image.from_pretrained(
38
  model_id,
39
+ torch_dtype=torch.float16,
40
+ variant="fp16", # Use the optimized fp16 variant
41
+ use_safetensors=True,
42
  token=hf_token
43
  ).to("cuda")
 
44
 
45
  # Optimization for speed and memory
46
  pipe.enable_model_cpu_offload()
 
68
 
69
  print(f"Generating image for prompt: '{request.prompt}'")
70
  try:
 
71
  image = pipe(
72
  prompt=request.prompt,
73
  negative_prompt=request.negative_prompt,
74
  num_inference_steps=request.steps
75
  ).images[0]
76
 
 
77
  buffer = io.BytesIO()
78
  image.save(buffer, format="PNG")
79
  img_str = base64.b64encode(buffer.getvalue()).decode("utf-8")