khhamid commited on
Commit
e2a99cd
·
verified ·
1 Parent(s): a5d9b1d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ models/mobileNet_1.keras filter=lfs diff=lfs merge=lfs -text
37
+ models/mobileNet_10.keras filter=lfs diff=lfs merge=lfs -text
.github/workflows/dockerhub_push.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI/CD - DockerHub
2
+
3
+ on:
4
+ push:
5
+ branches: [ "main" ]
6
+ pull_request:
7
+ branches: [ "main" ]
8
+ workflow_dispatch:
9
+
10
+ jobs:
11
+ build-test-deploy:
12
+ runs-on: ubuntu-latest
13
+
14
+ steps:
15
+ - name: Checkout code
16
+ uses: actions/checkout@v3
17
+
18
+ - name: Set up Python
19
+ uses: actions/setup-python@v4
20
+ with:
21
+ python-version: "3.11"
22
+
23
+ - name: Install dependencies
24
+ run: |
25
+ python -m pip install --upgrade pip
26
+ pip install -r requirements.txt
27
+ pip install pytest
28
+
29
+ - name: Login to Docker Hub
30
+ uses: docker/login-action@v3
31
+ with:
32
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
33
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
34
+
35
+ - name: Build and push Docker image
36
+ run: |
37
+ docker build -t ${{ secrets.DOCKERHUB_USERNAME }}/plants-diseases-lite:latest .
38
+ docker push ${{ secrets.DOCKERHUB_USERNAME }}/plants-diseases-lite:latest
39
+
40
+
.github/workflows/hf_deploy.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI/CD - Hugging Face Spaces & Models
2
+
3
+ on:
4
+ push:
5
+ branches: ["main"]
6
+ workflow_dispatch:
7
+
8
+ jobs:
9
+ build-and-deploy:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout code
14
+ uses: actions/checkout@v4
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v5
18
+ with:
19
+ python-version: "3.11"
20
+
21
+ - name: Install dependencies
22
+ run: |
23
+ python -m pip install --upgrade pip
24
+ pip install huggingface_hub
25
+
26
+ - name: Upload Model to Hugging Face Hub
27
+ env:
28
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
29
+ run: |
30
+ from huggingface_hub import HfApi
31
+ api = HfApi()
32
+ api.upload_file(
33
+ path_or_fileobj="models/mobilenet_int8.tflite",
34
+ path_in_repo="mobilenet_int8.tflite",
35
+ repo_id="${{ secrets.HF_USERNAME }}/plants-diseases-lite-model",
36
+ repo_type="model",
37
+ token="${{ secrets.HF_TOKEN }}"
38
+ )
39
+ api.upload_file(
40
+ path_or_fileobj="class_names.txt",
41
+ path_in_repo="class_names.txt",
42
+ repo_id="${{ secrets.HF_USERNAME }}/plants-diseases-lite-model",
43
+ repo_type="model",
44
+ token="${{ secrets.HF_TOKEN }}"
45
+ )
46
+ shell: python
47
+
48
+ - name: Deploy to Hugging Face Space
49
+ env:
50
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
51
+ run: |
52
+ from huggingface_hub import HfApi
53
+ api = HfApi()
54
+ api.upload_folder(
55
+ folder_path=".",
56
+ repo_id="${{ secrets.HF_USERNAME }}/plants-diseases-detection",
57
+ repo_type="space",
58
+ token="${{ secrets.HF_TOKEN }}"
59
+ )
60
+ shell: python
Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ COPY ./models/mobilenet_int8.tflite /app/models/mobilenet_int8.tflite
6
+ COPY ./src/app.py /app/app.py
7
+ COPY ./class_names.txt /app/class_names.txt
8
+
9
+ RUN pip install --no-cache-dir streamlit ai-edge-litert pillow numpy
10
+
11
+ EXPOSE 7860
12
+
13
+ CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]
class_names.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+
3
+ from keras.utils import image_dataset_from_directory
4
+
5
+ train_dir="/media/data/plants_diseases_dataset/train"
6
+
7
+ img_size=(224,224)
8
+ batch_size=32
9
+
10
+ train_ds=image_dataset_from_directory(train_dir,
11
+ image_size=img_size,
12
+ batch_size=batch_size,
13
+ label_mode="categorical")
14
+
15
+ class_names=train_ds.class_names
16
+ print(class_names)
17
+ with open("class_names.txt","w") as f:
18
+ for c in class_names:
19
+ f.write(c+"\n")
class_names.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apple___Apple_scab
2
+ Apple___Black_rot
3
+ Apple___Cedar_apple_rust
4
+ Apple___healthy
5
+ Blueberry___healthy
6
+ Cherry_(including_sour)___Powdery_mildew
7
+ Cherry_(including_sour)___healthy
8
+ Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot
9
+ Corn_(maize)___Common_rust_
10
+ Corn_(maize)___Northern_Leaf_Blight
11
+ Corn_(maize)___healthy
12
+ Grape___Black_rot
13
+ Grape___Esca_(Black_Measles)
14
+ Grape___Leaf_blight_(Isariopsis_Leaf_Spot)
15
+ Grape___healthy
16
+ Orange___Haunglongbing_(Citrus_greening)
17
+ Peach___Bacterial_spot
18
+ Peach___healthy
19
+ Pepper,_bell___Bacterial_spot
20
+ Pepper,_bell___healthy
21
+ Potato___Early_blight
22
+ Potato___Late_blight
23
+ Potato___healthy
24
+ Raspberry___healthy
25
+ Soybean___healthy
26
+ Squash___Powdery_mildew
27
+ Strawberry___Leaf_scorch
28
+ Strawberry___healthy
29
+ Tomato___Bacterial_spot
30
+ Tomato___Early_blight
31
+ Tomato___Late_blight
32
+ Tomato___Leaf_Mold
33
+ Tomato___Septoria_leaf_spot
34
+ Tomato___Spider_mites Two-spotted_spider_mite
35
+ Tomato___Target_Spot
36
+ Tomato___Tomato_Yellow_Leaf_Curl_Virus
37
+ Tomato___Tomato_mosaic_virus
38
+ Tomato___healthy
models/mobileNet_1.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47baecaa06b437ca30ef46621694d5cd53939257839c4b59e84624a45b4a5862
3
+ size 12104897
models/mobileNet_10.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cc79bad5bc518bb508017eb7831a5e2a75e9093168e7b9273aa3e2036cb9d03
3
+ size 12104897
models/mobilenet_int8.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4db78869c94c58dc55ffcada595f44d263cde56b3d376370202e04c24459996d
3
+ size 1126424
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ tensorflow
2
+ streamlit
3
+ keras
4
+ ai-edge-litert
5
+ matplotlib
6
+ scikit-learn
7
+ seaborn
src/__pycache__/train.cpython-311.pyc ADDED
Binary file (5.45 kB). View file
 
src/app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ from PIL import Image
4
+ from ai_edge_litert.interpreter import Interpreter
5
+ import time
6
+
7
+
8
+ st.title("🌿 MobileNet TFLite Image Classifier")
9
+ st.write("Upload an image to test your quantized MobileNet model.")
10
+
11
+
12
+ def load_labels(path):
13
+ try:
14
+ with open(path, "r") as f:
15
+ return [line.strip() for line in f.readlines()]
16
+ except FileNotFoundError:
17
+ return None
18
+
19
+ labels = load_labels("class_names.txt")
20
+
21
+ def preprocess(image_array: np.ndarray) -> np.ndarray:
22
+ """Replicate keras.applications.mobilenet_v3.preprocess_input"""
23
+ image_array = image_array.astype(np.float32)
24
+ image_array = image_array / 127.5 - 1.0 # scale to [-1, 1]
25
+ return image_array
26
+
27
+ @st.cache_resource
28
+ def load_tflite_model():
29
+ interpreter = Interpreter(model_path="models/mobilenet_int8.tflite")
30
+ interpreter.allocate_tensors()
31
+ return interpreter
32
+ interpreter = load_tflite_model()
33
+ input_details = interpreter.get_input_details()
34
+ output_details = interpreter.get_output_details()
35
+
36
+
37
+ uploaded_file = st.file_uploader("📸 Choose an image...", type=["jpg", "jpeg", "png"])
38
+
39
+ if uploaded_file is not None:
40
+ image = Image.open(uploaded_file).convert("RGB")
41
+ st.image(image, caption="Uploaded Image", width="stretch")
42
+
43
+ img = image.resize((224, 224))
44
+ img = np.array(img)
45
+ input_data = preprocess(img)
46
+ input_data = np.expand_dims(img, axis=0).astype(np.float32)
47
+
48
+
49
+ start = time.time()
50
+ interpreter.set_tensor(input_details[0]['index'], input_data)
51
+ interpreter.invoke()
52
+ preds = interpreter.get_tensor(output_details[0]['index'])[0]
53
+ inference_time = (time.time() - start) * 1000
54
+
55
+
56
+ top_k = preds.argsort()[-2:][::-1]
57
+ st.markdown("### 🔍 Predictions:")
58
+ for i in top_k:
59
+ label = labels[i] if labels else f"Class {i}"
60
+ st.write(f"**{label}** — {preds[i]*100:.2f}%")
61
+ if preds[i]==1:
62
+ break
63
+
64
+
65
+ st.write(f" Inference Time: {inference_time:.2f} ms")
src/convert.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import numpy as np
3
+ import time
4
+ import os
5
+ from keras.utils import image_dataset_from_directory
6
+
7
+ val_dir="/media/data/plants_diseases_dataset/valid"
8
+ path="/media/data/plants_diseases_dataset/"
9
+ img_size=(224,224)
10
+ batch_size=32
11
+ INPUT_SHAPE = (224, 224, 3)
12
+
13
+ keras_model = tf.keras.models.load_model("models/mobileNet_10.keras")
14
+
15
+
16
+ converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
17
+ converter.optimizations = [tf.lite.Optimize.DEFAULT]
18
+ tflite_model = converter.convert()
19
+ with open("models/mobilenet_int8.tflite", "wb") as f:
20
+ f.write(tflite_model)
21
+
22
+
23
+ def evaluate_models(keras_model, tflite_model_path, dataset):
24
+ """Compare size, speed, and accuracy of Keras vs TFLite model."""
25
+ keras_size = os.path.getsize("models/mobileNet_10.keras") / 1024**2
26
+ tflite_size = os.path.getsize(tflite_model_path) / 1024**2
27
+
28
+
29
+
30
+ interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
31
+ interpreter.allocate_tensors()
32
+ input_details = interpreter.get_input_details()
33
+ output_details = interpreter.get_output_details()
34
+
35
+ correct = 0
36
+ total = 0
37
+ times = []
38
+
39
+ for batch_images, batch_labels in dataset.take(50):
40
+ batch_images = batch_images.numpy()
41
+ batch_labels = tf.argmax(batch_labels, axis=1).numpy()
42
+
43
+ for i in range(len(batch_images)):
44
+ input_data = np.expand_dims(batch_images[i], axis=0).astype(np.float32)
45
+ start = time.time()
46
+ interpreter.set_tensor(input_details[0]['index'], input_data)
47
+ interpreter.invoke()
48
+ output = interpreter.get_tensor(output_details[0]['index'])
49
+ times.append(time.time() - start)
50
+
51
+ pred = np.argmax(output)
52
+ if pred == batch_labels[i]:
53
+ correct += 1
54
+ total += 1
55
+
56
+ tflite_acc = correct / total
57
+ avg_time = np.mean(times) * 1000
58
+ print(f"\nModel sizes:")
59
+ print(f" - Keras: {keras_size:.2f} MB")
60
+ print(f" - TFLite INT8: {tflite_size:.2f} MB")
61
+ print("---------------------------------------------")
62
+ _, keras_acc = keras_model.evaluate(dataset, verbose=0)
63
+ print(f"\n Keras Model Accuracy: {keras_acc*100:.2f}%")
64
+
65
+ print(f" TFLite Model Accuracy: {tflite_acc*100:.2f}%")
66
+ print(f" Avg Inference Time (1 image): {avg_time:.2f} ms")
67
+
68
+ print("\n Summary:")
69
+ print(f" - Size reduction: {(1 - tflite_size/keras_size)*100:.1f}%")
70
+ print(f" - Accuracy drop: {(keras_acc - tflite_acc)*100:.2f}%")
71
+
72
+
73
+
74
+ val_ds=image_dataset_from_directory(val_dir,
75
+ image_size=img_size,
76
+ batch_size=batch_size,
77
+ label_mode="categorical")
78
+ evaluate_models(keras_model, "models/mobilenet_int8.tflite", val_ds)
src/eval.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from sklearn.metrics import classification_report, confusion_matrix
3
+ import seaborn as sns
4
+ import matplotlib.pyplot as plt
5
+ from keras.utils import image_dataset_from_directory
6
+
7
+ train_dir="/media/data/plants_diseases_dataset/train"
8
+ val_dir="/media/data/plants_diseases_dataset/valid"
9
+ img_size=(224,224)
10
+ batch_size=32
11
+
12
+ train_ds=image_dataset_from_directory(train_dir,
13
+ image_size=img_size,
14
+ batch_size=batch_size,
15
+ label_mode="categorical")
16
+
17
+ val_ds=image_dataset_from_directory(val_dir,
18
+ image_size=img_size,
19
+ batch_size=batch_size,
20
+ label_mode="categorical")
21
+ def evaluate_model(model, dataset, class_names):
22
+ """
23
+ Evaluate a trained Keras model on a dataset.
24
+
25
+ Args:
26
+ model: Trained Keras model
27
+ dataset: tf.data.Dataset (e.g. val_ds)
28
+ class_names: list of class names
29
+ """
30
+
31
+ # Get predictions and true labels
32
+ y_true = []
33
+ y_pred = []
34
+
35
+ for batch in dataset:
36
+ images, labels = batch
37
+ preds = model.predict(images)
38
+ y_true.extend(np.argmax(labels.numpy(), axis=1))
39
+ y_pred.extend(np.argmax(preds, axis=1))
40
+
41
+ # Classification report
42
+ print("\n--- Classification Report ---")
43
+ print(classification_report(y_true, y_pred, target_names=class_names))
44
+
45
+ # Confusion matrix
46
+ cm = confusion_matrix(y_true, y_pred)
47
+ plt.figure(figsize=(10, 8))
48
+ sns.heatmap(cm, annot=False, cmap="Blues", xticklabels=class_names, yticklabels=class_names)
49
+ plt.title("Confusion Matrix")
50
+ plt.xlabel("Predicted")
51
+ plt.ylabel("True")
52
+ plt.show()
53
+
54
+ # Overall accuracy
55
+ acc = np.mean(np.array(y_true) == np.array(y_pred))
56
+ print(f"\n Accuracy: {acc*100:.2f}%")
57
+
58
+ from keras.models import load_model
59
+ from keras.applications import mobilenet_v3
60
+
61
+
62
+ model = load_model("models/mobileNet_1.keras")
63
+ evaluate_model(model, val_ds, class_names=train_ds.class_names)
src/train.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import keras
3
+ from keras.applications import MobileNetV3Small, mobilenet_v3
4
+ from keras import layers,models
5
+ from keras.utils import image_dataset_from_directory
6
+ import matplotlib.pyplot as plt
7
+
8
+
9
+ def preprocess(x):
10
+ return mobilenet_v3.preprocess_input(x)
11
+ train_dir="/media/data/plants_diseases_dataset/train"
12
+ val_dir="/media/data/plants_diseases_dataset/valid"
13
+ path="/media/data/plants_diseases_dataset/"
14
+ img_size=(224,224)
15
+ batch_size=32
16
+ INPUT_SHAPE = (224, 224, 3)
17
+ num_epochs=10
18
+ print(INPUT_SHAPE)
19
+ def get_datasets(path):
20
+ train_dir=path+"/train"
21
+ val_dir=path+"/valid"
22
+ train_ds=image_dataset_from_directory(train_dir,
23
+ image_size=img_size,
24
+ batch_size=batch_size,
25
+ label_mode="categorical")
26
+
27
+ val_ds=image_dataset_from_directory(val_dir,
28
+ image_size=img_size,
29
+ batch_size=batch_size,
30
+ label_mode="categorical")
31
+ num_classes=len(train_ds.class_names)
32
+ train_ds=train_ds.map(lambda x, y: (mobilenet_v3.preprocess_input(x), y))
33
+ val_ds=val_ds.map(lambda x, y: (mobilenet_v3.preprocess_input(x), y))
34
+
35
+ AUTOTUNE=tf.data.AUTOTUNE
36
+ train_ds=train_ds.prefetch(buffer_size=AUTOTUNE)
37
+ val_ds=val_ds.prefetch(buffer_size=AUTOTUNE)
38
+ return train_ds, val_ds, num_classes
39
+
40
+
41
+
42
+
43
+ def create_MobileNet(INPUT_SHAPE,NUM_CLASSES):
44
+ base_model=MobileNetV3Small(input_shape=INPUT_SHAPE,
45
+ include_top=False,
46
+ weights='imagenet')
47
+
48
+ model=models.Sequential([
49
+ keras.Input(shape=INPUT_SHAPE),
50
+ base_model,
51
+ layers.GlobalAveragePooling2D(),
52
+ layers.Dropout(0.5),
53
+ layers.Dense(NUM_CLASSES,activation='softmax')
54
+ ])
55
+
56
+ model.compile(
57
+ optimizer=keras.optimizers.Adam(0.001),
58
+ loss="categorical_crossentropy",
59
+ metrics=["accuracy"]
60
+ )
61
+
62
+ return model
63
+
64
+
65
+ def plot_hist(history):
66
+ plt.figure(figsize=(12, 5))
67
+ plt.subplot(1, 2, 1)
68
+ plt.plot(history.history["loss"], label="Train Loss")
69
+ plt.plot(history.history["val_loss"], label="Val Loss")
70
+ plt.title("Training and Validation Loss")
71
+ plt.xlabel("Epochs")
72
+ plt.ylabel("Loss")
73
+ plt.legend()
74
+
75
+
76
+ plt.subplot(1, 2, 2)
77
+ plt.plot(history.history["accuracy"], label="Train Accuracy")
78
+ plt.plot(history.history["val_accuracy"], label="Val Accuracy")
79
+ plt.title("Training and Validation Accuracy")
80
+ plt.xlabel("Epochs")
81
+ plt.ylabel("Accuracy")
82
+ plt.legend()
83
+
84
+ plt.tight_layout()
85
+ plt.show()
86
+
87
+ def main():
88
+
89
+ train_ds,val_ds,num_classes=get_datasets(path)
90
+ model=create_MobileNet(INPUT_SHAPE,num_classes)
91
+ history=model.fit(train_ds,validation_data=val_ds,epochs=num_epochs)
92
+ model.save(f"models/mobileNet_{num_epochs}.keras")
93
+ plot_hist(history)
94
+
95
+ if __name__=="__main__":
96
+ main()