Meet2304 commited on
Commit
ca4908b
·
1 Parent(s): 9c62e8c

Add api_name to Gradio endpoints for API access

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -32,8 +32,8 @@ import gradio as gr
32
 
33
  # ========== CONFIGURATION ==========
34
 
35
- # Model path (local files in the Space)
36
- MODEL_PATH = os.path.dirname(os.path.abspath(__file__))
37
 
38
  # Class names
39
  CLASS_NAMES = [
@@ -58,16 +58,16 @@ DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
58
 
59
  # ========== MODEL LOADING ==========
60
 
61
- print("Loading model from local files...")
62
- print(f"Model path: {MODEL_PATH}")
63
  print(f"Device: {DEVICE}")
64
 
65
- # Load image processor from local files
66
- processor = AutoImageProcessor.from_pretrained(MODEL_PATH)
67
  print("✓ Processor loaded")
68
 
69
- # Load model from local files
70
- model = ConvNextV2ForImageClassification.from_pretrained(MODEL_PATH)
71
  model = model.to(DEVICE)
72
  model.eval()
73
  print("✓ Model loaded and set to evaluation mode")
@@ -300,7 +300,8 @@ with gr.Blocks(css=custom_css, title="Project Phoenix - Cervical Cancer Cell Cla
300
  predict_btn_basic.click(
301
  fn=predict_basic,
302
  inputs=input_image_basic,
303
- outputs=output_label_basic
 
304
  )
305
 
306
  # Tab 2: Prediction with Explainability
@@ -320,7 +321,8 @@ with gr.Blocks(css=custom_css, title="Project Phoenix - Cervical Cancer Cell Cla
320
  predict_btn_explain.click(
321
  fn=predict_with_explainability,
322
  inputs=input_image_explain,
323
- outputs=[output_label_explain, output_gradcam, output_info]
 
324
  )
325
 
326
  # Footer
 
32
 
33
  # ========== CONFIGURATION ==========
34
 
35
+ # Hugging Face model ID
36
+ HF_MODEL_ID = os.getenv("HF_MODEL_ID", "Meet2304/convnextv2-cervical-cell-classification")
37
 
38
  # Class names
39
  CLASS_NAMES = [
 
58
 
59
  # ========== MODEL LOADING ==========
60
 
61
+ print("Loading model from Hugging Face...")
62
+ print(f"Model ID: {HF_MODEL_ID}")
63
  print(f"Device: {DEVICE}")
64
 
65
+ # Load image processor
66
+ processor = AutoImageProcessor.from_pretrained(HF_MODEL_ID)
67
  print("✓ Processor loaded")
68
 
69
+ # Load model
70
+ model = ConvNextV2ForImageClassification.from_pretrained(HF_MODEL_ID)
71
  model = model.to(DEVICE)
72
  model.eval()
73
  print("✓ Model loaded and set to evaluation mode")
 
300
  predict_btn_basic.click(
301
  fn=predict_basic,
302
  inputs=input_image_basic,
303
+ outputs=output_label_basic,
304
+ api_name="predict_basic"
305
  )
306
 
307
  # Tab 2: Prediction with Explainability
 
321
  predict_btn_explain.click(
322
  fn=predict_with_explainability,
323
  inputs=input_image_explain,
324
+ outputs=[output_label_explain, output_gradcam, output_info],
325
+ api_name="predict_with_explainability"
326
  )
327
 
328
  # Footer