Thiresh commited on
Commit
e951894
·
verified ·
1 Parent(s): cc4001d

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +12 -12
  3. app.py +158 -0
  4. model/Model2_Transfer.keras +3 -0
  5. model/metadata.json +6 -0
  6. requirements.txt +5 -3
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model/Model2_Transfer.keras filter=lfs diff=lfs merge=lfs -text
Dockerfile CHANGED
@@ -1,21 +1,21 @@
 
1
  FROM python:3.9-slim
2
 
 
3
  WORKDIR /app
4
 
5
- RUN apt-get update && apt-get install -y \
6
- build-essential \
7
- curl \
8
- software-properties-common \
9
- git \
10
- && rm -rf /var/lib/apt/lists/*
11
 
12
- COPY requirements.txt ./
13
- COPY src/ ./src/
 
14
 
15
- RUN pip3 install -r requirements.txt
 
16
 
 
17
  EXPOSE 8501
18
 
19
- HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
20
-
21
- ENTRYPOINT ["streamlit", "run", "src/streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0"]
 
1
+ # Use a minimal base image with Python
2
  FROM python:3.9-slim
3
 
4
+ # Set working directory
5
  WORKDIR /app
6
 
7
+ # Copy only requirements first for caching
8
+ COPY requirements.txt .
 
 
 
 
9
 
10
+ # Install dependencies
11
+ RUN pip install --upgrade pip && \
12
+ pip install -r requirements.txt
13
 
14
+ # Copy all files (including model folder) into container
15
+ COPY . .
16
 
17
+ # Expose port (Streamlit default)
18
  EXPOSE 8501
19
 
20
+ # Run the Streamlit app
21
+ CMD ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0", "--server.enableXsrfProtection=false"]
 
app.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit as st
3
+ import tensorflow as tf
4
+ from PIL import Image
5
+ import numpy as np
6
+ import json
7
+ import os
8
+ import io
9
+ import pandas as pd
10
+
11
+ st.set_page_config(page_title="Food Image Classification", layout="centered")
12
+
13
+ @st.cache_resource
14
+ def load_model_and_metadata():
15
+ """
16
+ Load the saved Keras model and metadata (class names, input size) from the local 'model/' folder.
17
+ """
18
+ model_path = os.path.join(os.getcwd(), "model")
19
+ # Load model: if SavedModel format is present:
20
+ try:
21
+ model = tf.keras.models.load_model(model_path)
22
+ except Exception as e:
23
+ st.error(f"Failed to load model from {model_path}: {e}")
24
+ return None, None, None, None
25
+
26
+ # Load metadata.json
27
+ meta_path = os.path.join(model_path, "metadata.json")
28
+ if not os.path.exists(meta_path):
29
+ st.error(f"metadata.json not found in {model_path}")
30
+ return model, None, None, None
31
+
32
+ with open(meta_path, "r") as f:
33
+ metadata = json.load(f)
34
+ class_names = metadata.get("class_names", None)
35
+ IMG_HEIGHT = metadata.get("IMG_HEIGHT", None)
36
+ IMG_WIDTH = metadata.get("IMG_WIDTH", None)
37
+
38
+ if class_names is None or IMG_HEIGHT is None or IMG_WIDTH is None:
39
+ st.error("metadata.json must contain 'class_names', 'IMG_HEIGHT', and 'IMG_WIDTH'")
40
+ return model, None, None, None
41
+ return model, class_names, IMG_HEIGHT, IMG_WIDTH
42
+
43
+ model, class_names, IMG_HEIGHT, IMG_WIDTH = load_model_and_metadata()
44
+ if model is None or class_names is None:
45
+ st.stop()
46
+
47
+
48
+
49
+ st.title("Food Image Classification")
50
+ st.write("Upload one or more food images (Bread / Soup / Vegetables-Fruits) to classify.")
51
+
52
+ # Sidebar info
53
+ with st.sidebar:
54
+ st.header("Instructions")
55
+ st.write(
56
+ """
57
+ - Upload JPG/PNG images of food.
58
+ - The model expects images resized to {}×{}.
59
+ - The model was trained to classify into: {}
60
+ - For best results, upload clear images of individual food item.
61
+ """.format(IMG_HEIGHT, IMG_WIDTH, ", ".join(class_names))
62
+ )
63
+
64
+ # Single image upload
65
+ st.subheader("Single Image Prediction")
66
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg","jpeg","png"], key="single")
67
+ if uploaded_file is not None:
68
+ # Read image
69
+ try:
70
+ image = Image.open(uploaded_file).convert("RGB")
71
+ except Exception as e:
72
+ st.error(f"Cannot open image: {e}")
73
+ image = None
74
+ if image:
75
+ # Display the uploaded image
76
+ st.image(image, caption="Uploaded Image", use_column_width=True)
77
+ # Preprocess: resize and scale
78
+ img_resized = image.resize((IMG_WIDTH, IMG_HEIGHT))
79
+ img_array = np.array(img_resized).astype("float32") / 255.0 # scale to [0,1]
80
+
81
+ # If your model expects other preprocessing (e.g., vgg preprocess), ensure metadata / model includes that.
82
+ input_tensor = np.expand_dims(img_array, axis=0) # shape (1, H, W, 3)
83
+
84
+ # Predict
85
+ preds = model.predict(input_tensor)
86
+ pred_idx = np.argmax(preds[0])
87
+ pred_class = class_names[pred_idx]
88
+ confidence = preds[0][pred_idx]
89
+ st.write(f"**Prediction:** {pred_class} \n**Confidence:** {confidence:.3f}")
90
+
91
+ # Show full probability distribution
92
+ prob_df = pd.DataFrame({
93
+ "class": class_names,
94
+ "probability": preds[0]
95
+ })
96
+ st.bar_chart(data=prob_df.set_index("class"))
97
+
98
+ # Optionally: download result
99
+ # E.g., prepare a small DataFrame
100
+ result_df = pd.DataFrame([{
101
+ "filename": uploaded_file.name,
102
+ "predicted_class": pred_class,
103
+ "confidence": float(confidence)
104
+ }])
105
+ csv = result_df.to_csv(index=False).encode('utf-8')
106
+ st.download_button(
107
+ label="Download prediction as CSV",
108
+ data=csv,
109
+ file_name="prediction.csv",
110
+ mime="text/csv"
111
+ )
112
+
113
+ # Batch image upload
114
+ st.subheader("Batch Image Prediction")
115
+ uploaded_files = st.file_uploader(
116
+ "Choose multiple images...", type=["jpg","jpeg","png"], accept_multiple_files=True, key="batch"
117
+ )
118
+ if uploaded_files:
119
+ if st.button("Run Batch Prediction"):
120
+ results = []
121
+ cols = st.columns(3)
122
+ # Loop through each uploaded image
123
+ for idx, up_file in enumerate(uploaded_files):
124
+ try:
125
+ img = Image.open(up_file).convert("RGB")
126
+ except Exception as e:
127
+ st.warning(f"Skipping file {up_file.name}: cannot open as image.")
128
+ continue
129
+ # Display thumbnails in grid
130
+ col = cols[idx % 3]
131
+ col.image(img.resize((150,150)), caption=up_file.name)
132
+ # Preprocess
133
+ img_resized = img.resize((IMG_WIDTH, IMG_HEIGHT))
134
+ img_array = np.array(img_resized).astype("float32") / 255.0
135
+ input_tensor = np.expand_dims(img_array, axis=0)
136
+ preds = model.predict(input_tensor)
137
+ pred_idx = np.argmax(preds[0])
138
+ pred_class = class_names[pred_idx]
139
+ confidence = preds[0][pred_idx]
140
+ results.append({
141
+ "filename": up_file.name,
142
+ "predicted_class": pred_class,
143
+ "confidence": float(confidence)
144
+ })
145
+ if results:
146
+ results_df = pd.DataFrame(results)
147
+ st.write("Batch Prediction Results:")
148
+ st.dataframe(results_df)
149
+ # Download button
150
+ csv = results_df.to_csv(index=False).encode('utf-8')
151
+ st.download_button(
152
+ label="Download all predictions as CSV",
153
+ data=csv,
154
+ file_name="batch_predictions.csv",
155
+ mime="text/csv"
156
+ )
157
+ else:
158
+ st.info("No valid images to predict.")
model/Model2_Transfer.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d48b0f02e6d27bfa2cd0c57c52100ff8046bd684e8915a9cb21295199e4daf31
3
+ size 59744313
model/metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+
2
+ {
3
+ "class_names": ["Bread", "Soup", "Vegetable-Fruit"],
4
+ "IMG_HEIGHT": 224,
5
+ "IMG_WIDTH": 224
6
+ }
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
- altair
2
- pandas
3
- streamlit
 
 
 
1
+ streamlit==1.43.2
2
+ Pillow==10.3.0
3
+ tensorflow==2.16.1 # Use the TensorFlow version your model was trained with
4
+ numpy==1.26.4
5
+ scikit-learn==1.4.2 # Needed for LabelBinarizer