Spaces:
Sleeping
Sleeping
Tahereh
commited on
Commit
·
f9d3bad
1
Parent(s):
420f791
Fix 500 error: add error handling, lazy initialization, and fix Gradio launch config for Hugging Face Spaces
Browse files- app.py +27 -8
- inference.py +1 -1
app.py
CHANGED
|
@@ -34,8 +34,14 @@ else:
|
|
| 34 |
# Use command line port if provided, otherwise use default
|
| 35 |
server_port = args.port if args.port is not None else default_port
|
| 36 |
|
| 37 |
-
# Initialize model
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
# Define example images and their parameters with updated values from the research
|
| 41 |
examples = [
|
|
@@ -216,6 +222,15 @@ examples = [
|
|
| 216 |
@GPU
|
| 217 |
def run_inference(image, model_type, inference_type, eps_value, num_iterations,
|
| 218 |
initial_noise=0.05, diffusion_noise=0.3, step_size=0.8, model_layer="layer3"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
# Check if image is provided
|
| 220 |
if image is None:
|
| 221 |
return None, "Please upload an image before running inference."
|
|
@@ -486,9 +501,13 @@ with gr.Blocks(title="Generative Inference for Psychiatry Demo", css="""
|
|
| 486 |
# Launch the demo
|
| 487 |
if __name__ == "__main__":
|
| 488 |
print(f"Starting server on port {server_port}")
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
# Use command line port if provided, otherwise use default
|
| 35 |
server_port = args.port if args.port is not None else default_port
|
| 36 |
|
| 37 |
+
# Initialize model (lazy initialization to avoid startup failures)
|
| 38 |
+
try:
|
| 39 |
+
model = GenerativeInferenceModel()
|
| 40 |
+
print("Model manager initialized successfully")
|
| 41 |
+
except Exception as e:
|
| 42 |
+
print(f"Warning: Error initializing model manager: {e}")
|
| 43 |
+
print("Will attempt to initialize on first use")
|
| 44 |
+
model = None
|
| 45 |
|
| 46 |
# Define example images and their parameters with updated values from the research
|
| 47 |
examples = [
|
|
|
|
| 222 |
@GPU
|
| 223 |
def run_inference(image, model_type, inference_type, eps_value, num_iterations,
|
| 224 |
initial_noise=0.05, diffusion_noise=0.3, step_size=0.8, model_layer="layer3"):
|
| 225 |
+
# Initialize model if not already initialized
|
| 226 |
+
global model
|
| 227 |
+
if model is None:
|
| 228 |
+
try:
|
| 229 |
+
model = GenerativeInferenceModel()
|
| 230 |
+
print("Model manager initialized on first use")
|
| 231 |
+
except Exception as e:
|
| 232 |
+
return None, f"Error initializing model: {str(e)}. Please try again."
|
| 233 |
+
|
| 234 |
# Check if image is provided
|
| 235 |
if image is None:
|
| 236 |
return None, "Please upload an image before running inference."
|
|
|
|
| 501 |
# Launch the demo
|
| 502 |
if __name__ == "__main__":
|
| 503 |
print(f"Starting server on port {server_port}")
|
| 504 |
+
# On Hugging Face Spaces, don't specify server_name/server_port
|
| 505 |
+
if "SPACE_ID" in os.environ:
|
| 506 |
+
demo.launch(share=False, debug=False)
|
| 507 |
+
else:
|
| 508 |
+
demo.launch(
|
| 509 |
+
server_name="0.0.0.0",
|
| 510 |
+
server_port=server_port,
|
| 511 |
+
share=False,
|
| 512 |
+
debug=True
|
| 513 |
+
)
|
inference.py
CHANGED
|
@@ -229,7 +229,7 @@ class GenerativeInferenceModel:
|
|
| 229 |
"""Get ImageNet labels."""
|
| 230 |
url = "https://raw.githubusercontent.com/anishathalye/imagenet-simple-labels/master/imagenet-simple-labels.json"
|
| 231 |
try:
|
| 232 |
-
response = requests.get(url)
|
| 233 |
if response.status_code == 200:
|
| 234 |
return response.json()
|
| 235 |
else:
|
|
|
|
| 229 |
"""Get ImageNet labels."""
|
| 230 |
url = "https://raw.githubusercontent.com/anishathalye/imagenet-simple-labels/master/imagenet-simple-labels.json"
|
| 231 |
try:
|
| 232 |
+
response = requests.get(url, timeout=10) # Add timeout to prevent hanging
|
| 233 |
if response.status_code == 200:
|
| 234 |
return response.json()
|
| 235 |
else:
|