Suhani-2407 commited on
Commit
d16e9f9
·
verified ·
1 Parent(s): 0962fd1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -7
app.py CHANGED
@@ -4,31 +4,53 @@ import gradio as gr
4
  import tensorflow as tf
5
  import numpy as np
6
  from PIL import Image
 
7
 
8
  # Load model
9
  model = tf.keras.models.load_model("MobileNet_model.h5")
10
  class_names = ["Fake", "Low", "Medium", "High"]
11
 
12
  def predict_image(img):
13
- if img is None:
14
- return {"error": "No image provided"}
15
  try:
 
 
 
 
 
 
 
 
 
 
 
16
  img = img.resize((128, 128))
17
  img_array = np.array(img) / 255.0
 
 
 
 
 
 
 
 
18
  img_array = np.expand_dims(img_array, axis=0)
19
  predictions = model.predict(img_array)
20
  class_index = np.argmax(predictions, axis=1)[0]
21
  confidence_scores = {class_names[i]: float(predictions[0][i]) for i in range(len(class_names))}
22
  return {"Predicted Class": class_names[class_index], "Confidence Scores": confidence_scores}
23
  except Exception as e:
24
- return {"error": str(e)}
25
 
 
26
  iface = gr.Interface(
27
  fn=predict_image,
28
- inputs=gr.Image(type="pil"),
29
- outputs="json"
 
 
 
30
  )
31
 
32
- # Add show_error=True to enable verbose error reporting
33
- iface.launch(server_name="0.0.0.0", server_port=7860, show_error=True) # Fix for Hugging Face
34
 
 
4
  import tensorflow as tf
5
  import numpy as np
6
  from PIL import Image
7
+ import traceback
8
 
9
  # Load model
10
  model = tf.keras.models.load_model("MobileNet_model.h5")
11
  class_names = ["Fake", "Low", "Medium", "High"]
12
 
13
  def predict_image(img):
 
 
14
  try:
15
+ if img is None:
16
+ return {"error": "No image provided"}
17
+
18
+ # If img is a file path, open it
19
+ if isinstance(img, str):
20
+ img = Image.open(img)
21
+
22
+ # Ensure it's a PIL Image
23
+ if not isinstance(img, Image.Image):
24
+ return {"error": f"Expected PIL Image, got {type(img)}"}
25
+
26
  img = img.resize((128, 128))
27
  img_array = np.array(img) / 255.0
28
+
29
+ # Handle grayscale images
30
+ if len(img_array.shape) == 2:
31
+ img_array = np.stack((img_array,) * 3, axis=-1)
32
+ # Handle RGBA images
33
+ elif img_array.shape[2] == 4:
34
+ img_array = img_array[:, :, :3]
35
+
36
  img_array = np.expand_dims(img_array, axis=0)
37
  predictions = model.predict(img_array)
38
  class_index = np.argmax(predictions, axis=1)[0]
39
  confidence_scores = {class_names[i]: float(predictions[0][i]) for i in range(len(class_names))}
40
  return {"Predicted Class": class_names[class_index], "Confidence Scores": confidence_scores}
41
  except Exception as e:
42
+ return {"error": str(e), "traceback": traceback.format_exc()}
43
 
44
+ # Create interface with explicit input type
45
  iface = gr.Interface(
46
  fn=predict_image,
47
+ inputs=gr.Image(type="pil", label="Upload Image"),
48
+ outputs=gr.JSON(),
49
+ examples=[["example1.jpg"], ["example2.jpg"]],
50
+ title="Fire Detection API",
51
+ description="Upload an image to detect fire presence and intensity"
52
  )
53
 
54
+ # Launch with debugging enabled
55
+ iface.launch(server_name="0.0.0.0", server_port=7860, share=True, debug=True, show_error=True)
56