onlyshrey98 commited on
Commit
66b4b26
·
1 Parent(s): e52ee72

Fixed: Re-committing model using Git LFS

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. Model.hdf5 +3 -0
  3. app.py +74 -0
  4. requirements.txt +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.hdf5 filter=lfs diff=lfs merge=lfs -text
Model.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfd13975286ffab8fa97a7fe189d47190f1e67a89e65ee89d0af313e53e80f59
3
+ size 129105944
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ import numpy as np
4
+ from PIL import Image
5
+
6
+ # Load your trained model
7
+ model = tf.keras.models.load_model('model.hdf5')
8
+
9
+ # Define the class names based on your image
10
+ # I created this list from the dictionary you provided
11
+ class_names = [
12
+ 'Apple___Apple_scab', 'Apple___Black_rot', 'Apple___Cedar_apple_rust', 'Apple___healthy',
13
+ 'Blueberry___healthy', 'Cherry_(including_sour)___Powdery_mildew', 'Cherry_(including_sour)___healthy',
14
+ 'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot', 'Corn_(maize)___Common_rust', 'Corn_(maize)___Northern_Leaf_Blight',
15
+ 'Corn_(maize)___healthy', 'Grape___Black_rot', 'Grape___Esca_(Black_Measles)', 'Grape___leaf_blight_(Isariopsis_Leaf_Spot)',
16
+ 'Grape___healthy', 'Orange___Haunglongbing_(Citrus_greening)', 'Peach___Bacterial_spot', 'Peach___healthy',
17
+ 'Pepper,_bell___Bacterial_spot', 'Pepper,_bell___healthy', 'Potato___Early_blight', 'Potato___Late_blight',
18
+ 'Potato___healthy', 'Raspberry___healthy', 'Soybean___healthy', 'Squash___Powdery_mildew', 'Strawberry___Leaf_scorch',
19
+ 'Strawberry___healthy', 'Tomato___Bacterial_spot', 'Tomato___Early_blight', 'Tomato___Late_blight',
20
+ 'Tomato___Leaf_Mold', 'Tomato___Septoria_leaf_spot', 'Tomato___Spider_mites Two-spotted_spider_mite',
21
+ 'Tomato___Target_Spot', 'Tomato___Yellow_Leaf_Curl_Virus', 'Tomato___mosaic_virus', 'Tomato___healthy'
22
+ ]
23
+
24
+
25
+ # Prediction function
26
+ def predict(image):
27
+ """
28
+ This function takes an image, preprocesses it, and returns the model's prediction.
29
+ """
30
+ # --- PREPROCESSING STEP ---
31
+ # The input 'image' from Gradio is a NumPy array.
32
+ # IMPORTANT: You might need to change the resize dimensions to match your model's input shape.
33
+ # Common shapes are (224, 224) or (256, 256). I'll use (256, 256) as a default.
34
+ img = Image.fromarray(image).resize((256, 256))
35
+ img_array = np.array(img)
36
+
37
+ # Normalize the image (if you did this during training)
38
+ # Common normalization is to scale pixel values to [0, 1]
39
+ img_array = img_array / 255.0
40
+
41
+ # Add a batch dimension
42
+ img_batch = np.expand_dims(img_array, axis=0)
43
+
44
+ # --- PREDICTION STEP ---
45
+ prediction = model.predict(img_batch)
46
+
47
+ # Get the index of the highest probability
48
+ predicted_class_index = np.argmax(prediction[0])
49
+
50
+ # Get the predicted class name
51
+ predicted_class_name = class_names[predicted_class_index]
52
+
53
+ # Get the confidence score
54
+ confidence = float(np.max(prediction[0]))
55
+
56
+ # Return a dictionary of labels and their confidences
57
+ return {predicted_class_name: confidence}
58
+
59
+ # --- GRADIO INTERFACE ---
60
+ iface = gr.Interface(
61
+ fn=predict,
62
+ inputs=gr.Image(label="Upload a photo of a crop leaf 🌿"),
63
+ outputs=gr.Label(num_top_classes=1, label="Prediction Result"),
64
+ title="Crop Disease Prediction",
65
+ description="Upload an image of a crop leaf to predict its disease. This model can identify 38 different conditions.",
66
+ examples=[
67
+ # You can add paths to example images here if you have any
68
+ # ["path/to/your/example1.jpg"],
69
+ # ["path/to/your/example2.jpg"]
70
+ ]
71
+ )
72
+
73
+ # Launch the app
74
+ iface.launch()
requirements.txt ADDED
File without changes