foreversheikh commited on
Commit
ae378d9
·
verified ·
1 Parent(s): 63d98ed

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +38 -72
  2. requirements.txt +3 -2
app.py CHANGED
@@ -75,20 +75,19 @@
75
 
76
  # if cap:
77
  # cap.release()
78
- import streamlit as st
 
 
 
79
  import numpy as np
80
  import tensorflow as tf
81
  from tensorflow.keras.models import load_model
82
  from PIL import Image
83
- import os
84
-
85
- # Check if running on Hugging Face Spaces
86
- on_huggingface = os.environ.get("SPACE_ID") is not None
87
 
88
- # Load model
89
  model = tf.keras.models.load_model("model_n.keras")
90
 
91
- # Class names
92
  class_names = [
93
  'Bush Clock Vine', 'Common Lanthana', 'Datura', 'Hibiscus', 'Jatropha', 'Marigold',
94
  'Nityakalyani', 'Rose', 'Yellow_Daisy', 'adathoda', 'banana', 'champaka', 'chitrak',
@@ -97,68 +96,35 @@ class_names = [
97
  'thumba', 'touch me not', 'tridax procumbens', 'wild_potato_vine'
98
  ]
99
 
100
- # Title
101
- st.title("🌼 Flower Identifier")
102
-
103
- # Choose mode
104
- if on_huggingface:
105
- st.warning("Real-time camera is not supported on Hugging Face. Please upload an image.")
106
- mode = "Upload Image"
107
- else:
108
- mode = st.radio("Choose input method:", ["Upload Image", "Real-Time Camera"])
109
-
110
- # Upload image mode
111
- if mode == "Upload Image":
112
- st.markdown("### Upload an image of a flower")
113
- img = st.file_uploader("Choose an image", type=["jpg", "jpeg", "png"])
114
-
115
- if img is not None:
116
- st.image(img, caption="Uploaded Image", use_column_width=True)
117
-
118
- image = Image.open(img).convert("RGB")
119
- image = tf.keras.preprocessing.image.img_to_array(image)
120
- image = tf.cast(image, tf.float32)
121
- image = tf.expand_dims(image, 0)
122
-
123
- if st.button("Identify Flower"):
124
- prediction = model.predict(image)
125
- predicted_class = np.argmax(prediction[0])
126
- confidence = round(100 * np.max(prediction[0]), 2)
127
- flower_name = class_names[predicted_class]
128
-
129
- st.success(f"🌸 Predicted Flower: **{flower_name}**")
130
- st.info(f"🔍 Confidence: **{confidence}%**")
131
-
132
- # Real-time camera mode (local only)
133
- elif mode == "Real-Time Camera":
134
- import cv2 # <- import only if needed
135
- st.markdown("### Real-Time Flower Recognition")
136
- run = st.checkbox('Start Camera')
137
- FRAME_WINDOW = st.image([])
138
-
139
- cap = None
140
- if run:
141
- cap = cv2.VideoCapture(0)
142
-
143
- while run:
144
- ret, frame = cap.read()
145
- if not ret:
146
- st.warning("⚠️ Failed to access camera.")
147
- break
148
-
149
- img_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
150
- img_array = tf.keras.preprocessing.image.img_to_array(img_rgb)
151
- img_array = tf.expand_dims(tf.cast(img_array, tf.float32), 0)
152
-
153
- predictions = model.predict(img_array)
154
- predicted_class = np.argmax(predictions[0])
155
- confidence = round(100 * np.max(predictions[0]), 2)
156
- flower_name = class_names[predicted_class]
157
-
158
- cv2.putText(frame, f"{flower_name} ({confidence}%)", (10, 30),
159
- cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
160
-
161
- FRAME_WINDOW.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
162
-
163
- if cap:
164
- cap.release()
 
75
 
76
  # if cap:
77
  # cap.release()
78
+
79
+
80
+
81
+ import gradio as gr
82
  import numpy as np
83
  import tensorflow as tf
84
  from tensorflow.keras.models import load_model
85
  from PIL import Image
 
 
 
 
86
 
87
+ # Load the model
88
  model = tf.keras.models.load_model("model_n.keras")
89
 
90
+ # Define class names
91
  class_names = [
92
  'Bush Clock Vine', 'Common Lanthana', 'Datura', 'Hibiscus', 'Jatropha', 'Marigold',
93
  'Nityakalyani', 'Rose', 'Yellow_Daisy', 'adathoda', 'banana', 'champaka', 'chitrak',
 
96
  'thumba', 'touch me not', 'tridax procumbens', 'wild_potato_vine'
97
  ]
98
 
99
+ # Prediction function
100
+ def predict_flower(img):
101
+ image = img.convert("RGB")
102
+ image = tf.keras.preprocessing.image.img_to_array(image)
103
+ image = tf.cast(image, tf.float32)
104
+ image = tf.expand_dims(image, 0)
105
+
106
+ prediction = model.predict(image)
107
+ predicted_class = np.argmax(prediction[0])
108
+ confidence = round(100 * np.max(prediction[0]), 2)
109
+ flower_name = class_names[predicted_class]
110
+
111
+ return f"🌼 Predicted Flower: {flower_name} ({confidence}%)"
112
+
113
+ # Gradio interface
114
+ title = "🌸 Flower Identifier using Deep Learning"
115
+ description = "Upload an image or use your camera to identify a flower from 28 known classes."
116
+
117
+ iface = gr.Interface(
118
+ fn=predict_flower,
119
+ inputs=[
120
+ gr.Image(type="pil", label="Upload or Capture Flower Image", source="upload", tool="editor")
121
+ ],
122
+ outputs="text",
123
+ title=title,
124
+ description=description,
125
+ live=False,
126
+ examples=None,
127
+ )
128
+
129
+ if __name__ == "__main__":
130
+ iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,6 +1,7 @@
 
1
  streamlit>=1.30.0
 
2
  numpy>=1.22.0
3
- tensorflow>=2.19.0
4
  Pillow>=9.0.0
5
- opencv-python>=4.5.5
6
  keras
 
1
+ gradio>=4.0.0
2
  streamlit>=1.30.0
3
+ tensorflow>=2.10.0
4
  numpy>=1.22.0
 
5
  Pillow>=9.0.0
6
+ opencv-python-headless>=4.5.5
7
  keras