fahrnphi commited on
Commit
7f7a44f
·
verified ·
1 Parent(s): 03469bf

Upload 3 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ fahrnphi_exam_project.keras filter=lfs diff=lfs merge=lfs -text
fahrnphi_exam_project.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:817ff30404dab55d0679049aafc26be1e7f963def186972d49c5c7cc186edf0f
3
+ size 250707767
intelligent_recipe_finder.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ from tensorflow.keras.preprocessing import image
5
+ import cv2
6
+
7
+ # Load the saved model
8
+ model = tf.keras.models.load_model("/mnt/data/fahrnphi_exam_project.keras")
9
+
10
+ # Set image dimensions
11
+ img_height, img_width = 150, 150 # Input size for the model
12
+
13
+ # Define a function for prediction and returning labels and probabilities
14
+ def predict_labels_and_probabilities(image_path):
15
+ # Load the image using OpenCV
16
+ img = cv2.imdecode(np.frombuffer(image_path.read(), np.uint8), 1)
17
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
18
+
19
+ # Assuming the input image might contain multiple ingredients, we will process it in patches.
20
+ # For simplicity, let's divide the image into 4 patches and classify each one
21
+ h, w, _ = img.shape
22
+ patches = [
23
+ img_rgb[0:h//2, 0:w//2], # Top-left
24
+ img_rgb[0:h//2, w//2:w], # Top-right
25
+ img_rgb[h//2:h, 0:w//2], # Bottom-left
26
+ img_rgb[h//2:h, w//2:w], # Bottom-right
27
+ ]
28
+
29
+ predictions = []
30
+
31
+ for patch in patches:
32
+ patch_resized = cv2.resize(patch, (img_height, img_width))
33
+ patch_array = image.img_to_array(patch_resized)
34
+ patch_array = np.expand_dims(patch_array, axis=0)
35
+ patch_array /= 255. # Scale pixel values
36
+
37
+ preds = model.predict(patch_array)
38
+ class_idx = np.argmax(preds[0])
39
+
40
+ # Map class indices to class names
41
+ class_labels = {
42
+ 0: 'Bell Pepper', 1: 'Carrot', 2: 'Garlic', 3: 'Ginger',
43
+ 4: 'Jalapeno', 5: 'Onion', 6: 'Potato', 7: 'Sweetpotato', 8: 'Tomato'
44
+ }
45
+ predicted_class = class_labels[class_idx]
46
+ probability = preds[0][class_idx]
47
+
48
+ predictions.append((predicted_class, probability))
49
+
50
+ return predictions
51
+
52
+ # Streamlit App
53
+ st.title("Intelligent Recipe Finder Classification")
54
+
55
+ uploaded_file = st.file_uploader("Choose an ingredients image...", type="jpg")
56
+
57
+ if uploaded_file is not None:
58
+ # Display the uploaded image
59
+ st.image(uploaded_file, caption='Uploaded Ingredient Image.', use_column_width=True)
60
+
61
+ # Perform the prediction and display the results
62
+ predictions = predict_labels_and_probabilities(uploaded_file)
63
+ st.write("Predictions for different patches of the image:")
64
+
65
+ for i, (label, probability) in enumerate(predictions):
66
+ st.write(f"Patch {i+1}:")
67
+ st.write("Prediction:", label)
68
+ st.write("Probability:", probability)
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ tensorflow