samithcs commited on
Commit
25c5985
·
1 Parent(s): 038fdc6

update project

Browse files
artifacts/models/baseline_history.json ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accuracy": [
3
+ 0.8880633115768433,
4
+ 0.9394962787628174,
5
+ 0.9497249722480774,
6
+ 0.9571552872657776,
7
+ 0.9619801044464111,
8
+ 0.9652610421180725,
9
+ 0.9676734805107117,
10
+ 0.9698928594589233,
11
+ 0.9725947976112366,
12
+ 0.9738492965698242,
13
+ 0.9725947976112366,
14
+ 0.9744282364845276,
15
+ 0.9767441749572754,
16
+ 0.976647675037384,
17
+ 0.9782881140708923,
18
+ 0.9805075526237488,
19
+ 0.97954261302948,
20
+ 0.9807970523834229,
21
+ 0.9814725518226624,
22
+ 0.983209490776062,
23
+ 0.9830164909362793,
24
+ 0.9819550514221191,
25
+ 0.9837884902954102,
26
+ 0.9834989905357361,
27
+ 0.9827269911766052,
28
+ 0.9844639301300049,
29
+ 0.9837884902954102,
30
+ 0.9862009286880493,
31
+ 0.9859114289283752,
32
+ 0.985042929649353,
33
+ 0.9880343675613403,
34
+ 0.9875518679618835,
35
+ 0.9875518679618835,
36
+ 0.9874553680419922,
37
+ 0.9886133074760437,
38
+ 0.9862009286880493
39
+ ],
40
+ "loss": [
41
+ 0.2664412558078766,
42
+ 0.15920524299144745,
43
+ 0.13362552225589752,
44
+ 0.11977039277553558,
45
+ 0.10575573146343231,
46
+ 0.09759275615215302,
47
+ 0.09332476556301117,
48
+ 0.08601260185241699,
49
+ 0.08296018838882446,
50
+ 0.078891821205616,
51
+ 0.07702494412660599,
52
+ 0.07292535156011581,
53
+ 0.06834259629249573,
54
+ 0.06703294813632965,
55
+ 0.06486733257770538,
56
+ 0.06227454915642738,
57
+ 0.061870187520980835,
58
+ 0.05987834930419922,
59
+ 0.056517522782087326,
60
+ 0.055569957941770554,
61
+ 0.053644366562366486,
62
+ 0.054151490330696106,
63
+ 0.05065951496362686,
64
+ 0.05058245360851288,
65
+ 0.050053149461746216,
66
+ 0.04905802756547928,
67
+ 0.048753850162029266,
68
+ 0.04615778475999832,
69
+ 0.04582696408033371,
70
+ 0.04523399844765663,
71
+ 0.04218636080622673,
72
+ 0.043016765266656876,
73
+ 0.041500113904476166,
74
+ 0.04166035354137421,
75
+ 0.040624041110277176,
76
+ 0.04107125103473663
77
+ ],
78
+ "val_accuracy": [
79
+ 0.9391827583312988,
80
+ 0.9502692222595215,
81
+ 0.9550206065177917,
82
+ 0.9543870687484741,
83
+ 0.9645232558250427,
84
+ 0.9623059630393982,
85
+ 0.9680076241493225,
86
+ 0.9695913791656494,
87
+ 0.9680076241493225,
88
+ 0.972758948802948,
89
+ 0.9759265184402466,
90
+ 0.9746595025062561,
91
+ 0.9787773489952087,
92
+ 0.9787773489952087,
93
+ 0.9794108271598816,
94
+ 0.9794108271598816,
95
+ 0.9806778430938721,
96
+ 0.9778270721435547,
97
+ 0.9835286736488342,
98
+ 0.9816281199455261,
99
+ 0.9832119345664978,
100
+ 0.9819448590278625,
101
+ 0.9819448590278625,
102
+ 0.9851124286651611,
103
+ 0.9771935343742371,
104
+ 0.9822616577148438,
105
+ 0.9832119345664978,
106
+ 0.9857459664344788,
107
+ 0.9841621518135071,
108
+ 0.9844789505004883,
109
+ 0.9863794445991516,
110
+ 0.9835286736488342,
111
+ 0.9844789505004883,
112
+ 0.9860627055168152,
113
+ 0.9828951358795166,
114
+ 0.9809946417808533
115
+ ],
116
+ "val_loss": [
117
+ 0.17172108590602875,
118
+ 0.13897046446800232,
119
+ 0.12103567272424698,
120
+ 0.11579054594039917,
121
+ 0.10252440720796585,
122
+ 0.10001140832901001,
123
+ 0.08984819054603577,
124
+ 0.08655385673046112,
125
+ 0.08577841520309448,
126
+ 0.07951025664806366,
127
+ 0.07641823589801788,
128
+ 0.07423911988735199,
129
+ 0.07199306041002274,
130
+ 0.07009323686361313,
131
+ 0.06831575930118561,
132
+ 0.06736170500516891,
133
+ 0.06620226800441742,
134
+ 0.06701282411813736,
135
+ 0.06184401735663414,
136
+ 0.06242649629712105,
137
+ 0.06026426702737808,
138
+ 0.05989912897348404,
139
+ 0.06237339600920677,
140
+ 0.05732772499322891,
141
+ 0.06300686299800873,
142
+ 0.059069763869047165,
143
+ 0.05765041336417198,
144
+ 0.05478701367974281,
145
+ 0.05648797005414963,
146
+ 0.055511001497507095,
147
+ 0.0535995289683342,
148
+ 0.0559844933450222,
149
+ 0.05646961182355881,
150
+ 0.053657740354537964,
151
+ 0.058711983263492584,
152
+ 0.05925580859184265
153
+ ]
154
+ }
artifacts/models/category_history.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accuracy": [
3
+ 0.8836225271224976,
4
+ 0.9570059776306152,
5
+ 0.9707329869270325,
6
+ 0.9783303141593933,
7
+ 0.9834240078926086,
8
+ 0.9850643277168274,
9
+ 0.9886903166770935,
10
+ 0.9917983412742615,
11
+ 0.9954243302345276,
12
+ 0.9938703179359436,
13
+ 0.9939566850662231,
14
+ 0.995078980922699,
15
+ 0.9947336316108704,
16
+ 0.9940429925918579,
17
+ 0.9933523535728455,
18
+ 0.9969783425331116,
19
+ 0.9955106377601624,
20
+ 0.996891975402832,
21
+ 0.9987049698829651,
22
+ 0.9983596801757812,
23
+ 0.9986186623573303
24
+ ],
25
+ "loss": [
26
+ 0.37353599071502686,
27
+ 0.14305350184440613,
28
+ 0.093997523188591,
29
+ 0.06734441965818405,
30
+ 0.049412257969379425,
31
+ 0.04349873214960098,
32
+ 0.03429267182946205,
33
+ 0.02595502696931362,
34
+ 0.017324281856417656,
35
+ 0.017613478004932404,
36
+ 0.018058663234114647,
37
+ 0.016504919156432152,
38
+ 0.01680760085582733,
39
+ 0.01652861014008522,
40
+ 0.019517360255122185,
41
+ 0.010544461198151112,
42
+ 0.01264826487749815,
43
+ 0.009982614777982235,
44
+ 0.005640930961817503,
45
+ 0.0056995246559381485,
46
+ 0.005192732438445091
47
+ ],
48
+ "val_accuracy": [
49
+ 0.9691011309623718,
50
+ 0.9846585988998413,
51
+ 0.9889801144599915,
52
+ 0.9950302243232727,
53
+ 0.9967588782310486,
54
+ 0.9958945512771606,
55
+ 0.9924373626708984,
56
+ 0.9987035393714905,
57
+ 0.9982714056968689,
58
+ 0.9989196062088013,
59
+ 0.9969749450683594,
60
+ 0.9984874725341797,
61
+ 0.9989196062088013,
62
+ 0.9984874725341797,
63
+ 0.9978392124176025,
64
+ 0.9997839331626892,
65
+ 0.9987035393714905,
66
+ 0.9991356730461121,
67
+ 0.9991356730461121,
68
+ 0.9993517994880676,
69
+ 0.9993517994880676
70
+ ],
71
+ "val_loss": [
72
+ 0.1030760183930397,
73
+ 0.058829668909311295,
74
+ 0.0396256297826767,
75
+ 0.024300476536154747,
76
+ 0.0160489771515131,
77
+ 0.016302524134516716,
78
+ 0.026506498456001282,
79
+ 0.009227240458130836,
80
+ 0.007496202364563942,
81
+ 0.00561791704967618,
82
+ 0.008364077657461166,
83
+ 0.005430787336081266,
84
+ 0.005307900719344616,
85
+ 0.00564191909506917,
86
+ 0.007319050841033459,
87
+ 0.0017467719735577703,
88
+ 0.004918231628835201,
89
+ 0.0028124600648880005,
90
+ 0.0021249386481940746,
91
+ 0.002299233339726925,
92
+ 0.0022428531665354967
93
+ ],
94
+ "learning_rate": [
95
+ 0.0010000000474974513,
96
+ 0.0010000000474974513,
97
+ 0.0010000000474974513,
98
+ 0.0010000000474974513,
99
+ 0.0010000000474974513,
100
+ 0.0010000000474974513,
101
+ 0.0010000000474974513,
102
+ 0.000800000037997961,
103
+ 0.000800000037997961,
104
+ 0.000800000037997961,
105
+ 0.000800000037997961,
106
+ 0.000800000037997961,
107
+ 0.000800000037997961,
108
+ 0.000800000037997961,
109
+ 0.000800000037997961,
110
+ 0.000640000042039901,
111
+ 0.000640000042039901,
112
+ 0.000640000042039901,
113
+ 0.0005120000569149852,
114
+ 0.0005120000569149852,
115
+ 0.00040960003389045596
116
+ ]
117
+ }
artifacts/models/category_labels.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ "Apple",
3
+ "Banana",
4
+ "Bellpepper",
5
+ "Carrot",
6
+ "Cucumber",
7
+ "Mango",
8
+ "Orange",
9
+ "Potato",
10
+ "Strawberry",
11
+ "Tomato"
12
+ ]
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ pandas
3
+ matplotlib
4
+ seaborn
5
+ tensorflow
6
+ opencv-python
7
+ fastapi
8
+ uvicorn
9
+ python-dotenv
10
+ Pillow
11
+ scikit-learn
12
+ ultralytics
13
+ gradio
src/app/app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from src.pipeline.prediction_pipeline import PredictionPipeline
3
+ import numpy as np
4
+ from PIL import Image
5
+
6
+ pipeline = PredictionPipeline()
7
+
8
+ def predict_single(image):
9
+
10
+ if image is None:
11
+ return None, "No image detected!", "No image detected!"
12
+ img = Image.fromarray(image) if isinstance(image, np.ndarray) else image
13
+ result = pipeline.predict(img)
14
+ annotated_img = pipeline.annotate(img, result)
15
+ return annotated_img, result["category"], result["freshness"]
16
+
17
+ with gr.Blocks() as demo:
18
+ gr.Markdown("# Food Freshness Detection")
19
+
20
+ with gr.Tab("Image Upload"):
21
+ image = gr.Image(sources=["upload"], label="Upload an Image")
22
+ out_img = gr.Image()
23
+ cat = gr.Textbox(label="Category")
24
+ fresh = gr.Textbox(label="Freshness")
25
+ btn = gr.Button("Predict on Image")
26
+ btn.click(predict_single, inputs=image, outputs=[out_img, cat, fresh])
27
+
28
+
29
+ with gr.Tab("Live Webcam"):
30
+ webcam = gr.Image(sources=["webcam"], label="Webcam")
31
+ out_img = gr.Image()
32
+ cat = gr.Textbox(label="Category")
33
+ fresh = gr.Textbox(label="Freshness")
34
+ btn = gr.Button("Predict")
35
+ btn.click(predict_single, inputs=webcam, outputs=[out_img, cat, fresh])
36
+
37
+
38
+
39
+
40
+ if __name__ == "__main__":
41
+ demo.launch(server_name="0.0.0.0", server_port=7860)
src/config.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ class Config:
5
+ PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
6
+
7
+ DATA_DIR = os.path.join(PROJECT_ROOT, 'artifacts', 'data')
8
+ RAW_DATA_DIR = os.path.join(PROJECT_ROOT, 'artifacts', 'data', 'raw')
9
+ PROCESSED_DATA_DIR = os.path.join(PROJECT_ROOT, 'artifacts', 'data', 'processed')
10
+ MODEL_DIR = os.path.join(PROJECT_ROOT, 'artifacts', 'models')
11
+ RESULTS_DIR = os.path.join(PROJECT_ROOT, 'artifacts', 'results')
12
+
13
+ METADATA_FILE = os.path.join(PROJECT_ROOT, 'artifacts', 'data', 'metadata.json')
14
+ CATEGORY_NAMES = ['Fruits','Vegetables']
15
+ FRUIT_NAMES = ['Apple', 'Banana', 'Mango', 'Orange', 'Strawberry']
16
+ VEGETABLE_NAMES = ['Bellpepper', 'Carrot', 'Cucumber', 'Potato', 'Tomato' ]
17
+ CLASS_NAMES = ['Fresh', 'Rotten']
18
+
src/pipeline/__init__.py ADDED
File without changes
src/pipeline/prediction_pipeline.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ import numpy as np
4
+ from PIL import Image, ImageDraw, ImageFont
5
+ import tensorflow as tf
6
+ import json
7
+
8
+ from src.config import Config
9
+
10
+ CATEGORY_MODEL_PATH = Path(Config.MODEL_DIR) / "category_classifier.keras"
11
+ FRESHNESS_MODEL_PATH = Path(Config.MODEL_DIR) / "mobilenetv2_baseline.keras"
12
+
13
+ CATEGORY_LABELS_PATH = Path(Config.MODEL_DIR) / "category_labels.json"
14
+ with open(CATEGORY_LABELS_PATH, "r") as f:
15
+ CATEGORY_LABELS = json.load(f)
16
+
17
+ FRESHNESS_LABELS = ["Fresh", "Rotten"]
18
+
19
+ CATEGORY_IMG_SIZE = (224, 224)
20
+ FRESHNESS_IMG_SIZE = (224, 224)
21
+
22
+ class PredictionPipeline:
23
+ def __init__(self):
24
+ self.category_model = tf.keras.models.load_model(CATEGORY_MODEL_PATH)
25
+ self.freshness_model = tf.keras.models.load_model(FRESHNESS_MODEL_PATH)
26
+ self.category_labels = CATEGORY_LABELS
27
+ self.freshness_labels = FRESHNESS_LABELS
28
+
29
+ def _preprocess_image(self, img, target_size, normalize=True):
30
+ if isinstance(img, (str, Path)):
31
+ img_path = Path(img)
32
+ if not img_path.exists():
33
+ raise FileNotFoundError(f"Image not found: {img_path.resolve()}")
34
+ img = Image.open(str(img_path)).convert("RGB")
35
+ elif isinstance(img, np.ndarray):
36
+ if img.ndim == 3 and img.shape[2] == 3:
37
+ img = Image.fromarray(img)
38
+ else:
39
+ raise ValueError("NumPy input must be shape (H, W, 3)")
40
+ img_resized = img.resize(target_size)
41
+ img_array = np.array(img_resized).astype("float32")
42
+ if normalize:
43
+ img_array = img_array / 255.0
44
+ img_array = np.expand_dims(img_array, axis=0)
45
+ return img_array, img
46
+
47
+
48
+ def predict(self, img):
49
+
50
+ cat_img_array, pil_img = self._preprocess_image(img, CATEGORY_IMG_SIZE, normalize=True)
51
+ cat_pred = self.category_model.predict(cat_img_array)
52
+ cat_idx = int(np.argmax(cat_pred))
53
+ cat_label = self.category_labels[cat_idx]
54
+ cat_score = float(np.max(cat_pred))
55
+
56
+
57
+ fresh_img_array, _ = self._preprocess_image(img, FRESHNESS_IMG_SIZE, normalize=True)
58
+ fresh_pred = self.freshness_model.predict(fresh_img_array)
59
+ fresh_idx = int(np.argmax(fresh_pred))
60
+ fresh_label = self.freshness_labels[fresh_idx]
61
+ fresh_score = float(np.max(fresh_pred))
62
+
63
+ return {
64
+ "category": {"label": cat_label, "idx": cat_idx, "score": cat_score},
65
+ "freshness": {"label": fresh_label, "idx": fresh_idx, "score": fresh_score},
66
+ "pil_img": pil_img
67
+ }
68
+
69
+ def annotate(self, img, result, font_size=28):
70
+ pil_img = result["pil_img"]
71
+ draw = ImageDraw.Draw(pil_img)
72
+ text = f"{result['category']['label']} ({result['category']['score']:.2f}) | " \
73
+ f"{result['freshness']['label']} ({result['freshness']['score']:.2f})"
74
+ try:
75
+ font = ImageFont.truetype("arial.ttf", font_size)
76
+ except:
77
+ font = ImageFont.load_default()
78
+ draw.rectangle([0, 0, pil_img.width, font_size+8], fill=(0,0,0,160))
79
+ draw.text((5, 2), text, fill=(255, 255, 255), font=font)
80
+ return pil_img
81
+
82
+ if __name__ == "__main__":
83
+ pipeline = PredictionPipeline()
84
+
85
+ img_path = r"artifacts\data\category\test\Apple\rottenApple (175).jpg"
86
+
87
+ result = pipeline.predict(img_path)
88
+
89
+ print("Prediction:", result["category"], "|", result["freshness"])
90
+
91
+ annotated_img = pipeline.annotate(img_path, result)
92
+
93
+ annotated_img.save(r"artifacts\results\annotated_result.jpg")