b3rian commited on
Commit
b235cc8
Β·
verified Β·
1 Parent(s): 2dfacd5

Upload folder using huggingface_hub

Browse files
.devcontainer/devcontainer.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Python 3",
3
+ // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
4
+ "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye",
5
+ "customizations": {
6
+ "codespaces": {
7
+ "openFiles": [
8
+ "README.md",
9
+ "streamlit_app/main.py"
10
+ ]
11
+ },
12
+ "vscode": {
13
+ "settings": {},
14
+ "extensions": [
15
+ "ms-python.python",
16
+ "ms-python.vscode-pylance"
17
+ ]
18
+ }
19
+ },
20
+ "updateContentCommand": "[ -f packages.txt ] && sudo apt update && sudo apt upgrade -y && sudo xargs apt install -y <packages.txt; [ -f requirements.txt ] && pip3 install --user -r requirements.txt; pip3 install --user streamlit; echo 'βœ… Packages installed and Requirements met'",
21
+ "postAttachCommand": {
22
+ "server": "streamlit run streamlit_app/main.py --server.enableCORS false --server.enableXsrfProtection false"
23
+ },
24
+ "portsAttributes": {
25
+ "8501": {
26
+ "label": "Application",
27
+ "onAutoForward": "openPreview"
28
+ }
29
+ },
30
+ "forwardPorts": [
31
+ 8501
32
+ ]
33
+ }
.dockerignore ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ----------------------------------------
2
+ # Ignore system, cache, and temporary files
3
+ # ----------------------------------------
4
+ __pycache__/
5
+ *.pyc
6
+ *.pyo
7
+ *.pyd
8
+ *.log
9
+ *.DS_Store
10
+ *.swp
11
+ *.bak
12
+ *.tmp
13
+ *~
14
+
15
+ # ----------------------------------------
16
+ # Ignore virtual environments
17
+ # ----------------------------------------
18
+ venv/
19
+ .env/
20
+ env/
21
+ .venv/
22
+
23
+ # ----------------------------------------
24
+ # Ignore Git and version control files
25
+ # ----------------------------------------
26
+ .git/
27
+ .gitignore
28
+ .gitattributes
29
+
30
+ # ----------------------------------------
31
+ # Ignore notebook checkpoints or dev tools
32
+ # ----------------------------------------
33
+ .ipynb_checkpoints/
34
+ notebooks/
35
+ tests/
36
+ debug/
37
+ dev/
38
+
39
+ # ----------------------------------------
40
+ # Ignore unnecessary project folders (if applicable)
41
+ # ----------------------------------------
42
+ api_backend/
43
+ data/
44
+ models/
45
+ checkpoints/
46
+ experiments/
47
+
48
+ # ----------------------------------------
49
+ # Ignore local docker/compose or CI files (optional)
50
+ # ----------------------------------------
51
+ docker-compose.yml
52
+ docker-compose.override.yml
53
+ Dockerfile.dev
54
+ .env.dev
Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ ENV PYTHONDONTWRITEBYTECODE=1
4
+ ENV PYTHONUNBUFFERED=1
5
+
6
+ WORKDIR /app
7
+
8
+ COPY requirements.txt .
9
+ COPY streamlit_app/ ./streamlit_app/
10
+
11
+ RUN apt-get update && apt-get install -y \
12
+ build-essential \
13
+ libglib2.0-0 \
14
+ libsm6 \
15
+ libxext6 \
16
+ libxrender-dev \
17
+ wget \
18
+ curl \
19
+ && rm -rf /var/lib/apt/lists/*
20
+
21
+ RUN pip install --no-cache-dir -r requirements.txt
22
+ RUN chmod a+x streamlit_app/main.py
23
+
24
+ EXPOSE 7860
25
+
26
+ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
27
+ CMD curl -f http://localhost:$PORT/ || exit 1
28
+
29
+ CMD ["streamlit", "run", "streamlit_app/main.py", "--server.port=7860", "--server.address=0.0.0.0"]
deploy.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import HfApi, upload_folder
2
+ from pathlib import Path
3
+
4
+ # Setup repo info
5
+ username = "b3rian"
6
+ repo_name = "streamlit-ui"
7
+ local_dir = Path(__file__).resolve().parent # Automatically detect current folder
8
+ repo_type = "space"
9
+ space_sdk = "docker"
10
+
11
+ # 1. Create the space
12
+ api = HfApi()
13
+ api.create_repo(
14
+ repo_id=f"{username}/{repo_name}",
15
+ repo_type=repo_type,
16
+ space_sdk=space_sdk,
17
+ exist_ok=True # Don't fail if it already exists
18
+ )
19
+
20
+ # 2. Upload the entire folder to the space
21
+ upload_folder(
22
+ repo_id=f"{username}/{repo_name}",
23
+ folder_path=local_dir,
24
+ repo_type=repo_type
25
+ )
26
+
27
+ print(f"βœ… Deployed to https://huggingface.co/spaces/{username}/{repo_name}")
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ pillow
3
+ requests
4
+ numpy
5
+ pandas
6
+ python-dotenv
streamlit_app/main.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import io
4
+ import base64
5
+ from PIL import Image, ImageOps
6
+ import numpy as np
7
+ import json
8
+ import time
9
+ import pandas as pd
10
+ from typing import List
11
+ from datetime import datetime
12
+
13
+ # =================== CONFIG ===================
14
+ API_URL = "https://b3rian-image-classifier-api.hf.space/predict"
15
+ SUPPORTED_FORMATS = ["jpg", "jpeg", "png", "webp"]
16
+ MAX_SIZE_MB = 10
17
+ MAX_SIZE_BYTES = MAX_SIZE_MB * 1024 * 1024
18
+
19
+ # =================== UTILITY FUNCTIONS ===================
20
+ def compress_image(image: Image.Image, quality: int = 85) -> bytes:
21
+ with io.BytesIO() as output:
22
+ image.save(output, format='JPEG', quality=quality)
23
+ return output.getvalue()
24
+
25
+ def create_thumbnail(image: Image.Image, size=(128, 128)) -> str:
26
+ image.thumbnail(size)
27
+ with io.BytesIO() as buffer:
28
+ image.save(buffer, format="JPEG", quality=70)
29
+ return base64.b64encode(buffer.getvalue()).decode()
30
+
31
+ def validate_image(file) -> Image.Image:
32
+ try:
33
+ if hasattr(file, 'size') and file.size > MAX_SIZE_BYTES:
34
+ st.error(f"File too large (max {MAX_SIZE_MB}MB)")
35
+ return None
36
+ image = Image.open(file)
37
+ image.verify()
38
+ image = Image.open(file)
39
+ return image.convert("RGB")
40
+ except Exception as e:
41
+ st.error(f"Invalid image: {str(e)}")
42
+ return None
43
+
44
+ def fetch_image_from_url(url: str) -> Image.Image:
45
+ try:
46
+ with st.spinner("Fetching image from URL..."):
47
+ head_response = requests.head(url, timeout=20, allow_redirects=True)
48
+ if head_response.status_code != 200:
49
+ raise ValueError(f"URL returned {head_response.status_code}")
50
+ response = requests.get(url, timeout=10)
51
+ response.raise_for_status()
52
+ return Image.open(io.BytesIO(response.content)).convert("RGB")
53
+ except Exception as e:
54
+ st.error(f"URL Error: {str(e)}")
55
+ return None
56
+
57
+ def get_image_metadata(img: Image.Image) -> str:
58
+ return f"Size: {img.size}, Mode: {img.mode}, Format: {img.format}"
59
+
60
+ def classify_image_with_retry(image: Image.Image, model_name: str, max_retries=2):
61
+ img_bytes = compress_image(image)
62
+ files = {"file": ("image.jpg", img_bytes, "image/jpeg")}
63
+ params = {"model_name": model_name}
64
+
65
+ for attempt in range(max_retries + 1):
66
+ try:
67
+ with st.spinner(f"Classifying with {model_name}..."):
68
+ res = requests.post(API_URL, files=files, params=params, timeout=120)
69
+ res.raise_for_status()
70
+ return res.json()
71
+ except requests.exceptions.ConnectionError:
72
+ if attempt == max_retries:
73
+ st.error("⚠️ The model server is currently offline. Please try again later.")
74
+ return None
75
+ time.sleep(1)
76
+ except requests.exceptions.Timeout:
77
+ if attempt == max_retries:
78
+ st.error("⏳ The request to the model server timed out. Please try again.")
79
+ return None
80
+ time.sleep(1)
81
+ except requests.exceptions.HTTPError as e:
82
+ st.error(f"🚫 HTTP error: {e.response.status_code} - {e.response.reason}")
83
+ return None
84
+ except requests.exceptions.RequestException:
85
+ if attempt == max_retries:
86
+ st.error("🚨 An unexpected error occurred while contacting the model server.")
87
+ return None
88
+ time.sleep(1)
89
+
90
+ def display_predictions(predictions, model_version, inference_time):
91
+ st.subheader(f"Predictions: {model_version}")
92
+ if not predictions:
93
+ st.warning("No predictions above the confidence threshold.")
94
+ return
95
+ df = pd.DataFrame(predictions)
96
+ df = df.set_index("label")
97
+
98
+ for pred in predictions:
99
+ st.markdown(f"**{pred['label']}**: {pred['confidence']}%")
100
+ st.progress(pred['confidence'] / 100.0)
101
+
102
+ st.caption(f"Inference time: {inference_time:.2f}s")
103
+
104
+ # =================== MAIN APP ===================
105
+ def main():
106
+ st.markdown("---")
107
+ st.set_page_config(page_title="Image Classifier", layout="wide", page_icon="πŸ–ΌοΈ")
108
+ st.title("πŸ–ΌοΈ AI Image Classifier")
109
+ st.caption("Powered by Convolutional Neural Networks (CNNs)")
110
+
111
+ st.markdown("""
112
+ πŸ“Œ Upload or capture an image and choose a CNN model to classify it.
113
+
114
+ πŸ” **How it works**:
115
+ The selected AI model analyzes your image and returns its best predictions, sorted by confidence.
116
+ """)
117
+
118
+ # Initialize session state
119
+ st.session_state.setdefault("history", [])
120
+ st.session_state.setdefault("feedback", {})
121
+ st.session_state.setdefault("model_cache", {})
122
+
123
+ # Sidebar controls
124
+ with st.sidebar:
125
+ st.markdown("---")
126
+ st.markdown("### βš™οΈ Preferences & Model Selection")
127
+ with st.expander("Advanced Options"):
128
+ num_predictions = st.slider(
129
+ "Number of predictions",
130
+ 1, 10, 3,
131
+ help="""Set how many predictions to display (1-10).
132
+ Higher values show more alternatives but may include less relevant results."""
133
+ )
134
+ confidence_threshold = st.slider(
135
+ "Confidence threshold (%)",
136
+ 0, 100, 0,
137
+ help="""Minimum confidence percentage (0-100%) required to show a prediction.
138
+ Increase to filter out low-confidence results."""
139
+ )
140
+ compare_models = st.checkbox(
141
+ "πŸ” Compare Models",
142
+ help="Run both models on the image and compare their predictions."
143
+ )
144
+
145
+ model_name = st.selectbox(
146
+ "Select 🧠 AI Model",
147
+ ["efficientnet", "resnet"],
148
+ disabled=compare_models,
149
+ help="""Choose a deep learning architecture:
150
+ β€’ **EfficientNet:** Lightweight and fast (good for mobile/edge devices)
151
+ β€’ **ResNet:** Powerful general-purpose model (best accuracy/speed balance).
152
+ Disabled when 'Compare Models' is active - all models will run simultaneously."""
153
+ )
154
+
155
+ st.markdown("---")
156
+ st.subheader("πŸ’¬ Feedback")
157
+
158
+ with st.form("feedback_form_sidebar"):
159
+ history = st.session_state["history"]
160
+ if history:
161
+ selected = st.selectbox("Select image to review", [h["name"] for h in history],
162
+ help="""Choose a previously classified image to provide feedback on.
163
+ The model's predictions for this image will be shown below for reference.
164
+ Only images with existing classification results appear here.""")
165
+ rating = st.select_slider("Rating (1-5)", options=[1, 2, 3, 4, 5], value=3,
166
+ help="""Rate the model's accuracy for this image:
167
+ 1 = Completely wrong β€’ 2 = Mostly incorrect β€’ 3 = Partially correct
168
+ 4 = Mostly accurate β€’ 5 = Perfect prediction """)
169
+ selected_item = next((h for h in history if h["name"] == selected), None)
170
+ if selected_item:
171
+ st.markdown("**Model Predictions:**")
172
+ for pred in selected_item["predictions"]:
173
+ st.markdown(f"- {pred['label']}: {pred['confidence']:.1f}%")
174
+ correction = st.text_input("Suggested correction", placeholder="Correct label",
175
+ help="""If the AI's prediction was wrong, please provide:
176
+ β€’ The accurate label for this image
177
+ β€’ Be specific (e.g., 'Golden Retriever' instead of just 'Dog')
178
+ β€’ Use singular nouns where applicable
179
+ Your input helps train better models!""")
180
+ comment = st.text_area("Additional comments", placeholder="Anything else?",
181
+ help="""Share details to improve the model:
182
+ β€’ What features did the AI miss?
183
+ β€’ Was the mistake understandable?
184
+ β€’ Any edge cases we should know about?
185
+
186
+ (Examples: 'The turtle was partially obscured' or 'Confused labrador with golden retriever')""")
187
+ else:
188
+ st.info("No images classified yet.")
189
+ selected = rating = correction = comment = None
190
+
191
+ if st.form_submit_button("Submit Feedback", type='primary') and selected:
192
+ st.session_state["feedback"][selected] = {
193
+ "rating": rating,
194
+ "predictions": selected_item.get("predictions", []),
195
+ "correction": correction,
196
+ "comment": comment,
197
+ "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
198
+ }
199
+ st.toast("Feedback saved!", icon="βœ…")
200
+
201
+ # Image input methods
202
+ images = []
203
+ tab1, tab2, tab3 = st.tabs(["πŸ“€ Upload Image", "πŸ“· Use Webcam", "🌐 From URL"])
204
+
205
+ with tab1:
206
+ uploaded_files = st.file_uploader("Upload Image(s)", type=SUPPORTED_FORMATS, accept_multiple_files=True)
207
+ for file in uploaded_files:
208
+ img = validate_image(file)
209
+ if img:
210
+ images.append((img, file.name))
211
+
212
+ with tab2:
213
+ try:
214
+ picture = st.camera_input("Capture Image")
215
+ if picture:
216
+ img = validate_image(picture)
217
+ if img:
218
+ images.append((img, f"webcam_{time.strftime('%Y%m%d_%H%M%S')}.jpg"))
219
+ except Exception:
220
+ st.error("Webcam not supported on this device.")
221
+
222
+ with tab3:
223
+ url = st.text_input("Image URL", placeholder="https://example.com/image.jpg")
224
+ col1, col2 = st.columns([3, 1])
225
+ if col1.button("Fetch Image", type='primary') and url:
226
+ img = fetch_image_from_url(url)
227
+ if img:
228
+ images.append((img, f"url_{time.strftime('%Y%m%d_%H%M%S')}.jpg"))
229
+ if col2.button("Clear URL", type='primary'):
230
+ url = ""
231
+
232
+ # Classify images
233
+ if images:
234
+ st.subheader("πŸ–ΌοΈ Image Preview")
235
+ for idx, (img, name) in enumerate(images):
236
+ with st.expander(f"Image: {name}", expanded=True):
237
+ col1, col2 = st.columns([1, 2])
238
+ with col1:
239
+ st.image(img, caption=name, use_container_width=True)
240
+ with col2:
241
+ st.markdown(get_image_metadata(img))
242
+ if st.button("πŸš€ Classify Image", key=f"classify_{idx}", type='primary'):
243
+ models_to_run = ["efficientnet", "resnet"] if compare_models else [model_name]
244
+ for model in models_to_run:
245
+ cache_key = f"{name}_{model}"
246
+ result = st.session_state.model_cache.get(cache_key)
247
+ if result:
248
+ st.toast(f"Using cached result for {model}")
249
+ else:
250
+ result = classify_image_with_retry(img, model)
251
+ if result:
252
+ st.session_state.model_cache[cache_key] = result
253
+
254
+ if result:
255
+ preds = [p for p in result['predictions'] if p['confidence'] >= confidence_threshold][:num_predictions]
256
+ display_predictions(preds, result['model_version'], result['inference_time'])
257
+ st.session_state.history.append({
258
+ "name": name,
259
+ "predictions": preds,
260
+ "model": result['model_version'],
261
+ "time": result.get('timestamp', datetime.now().isoformat()),
262
+ "thumbnail": create_thumbnail(img)
263
+ })
264
+
265
+ # Show history
266
+ st.divider()
267
+ st.subheader("πŸ“œ Session History")
268
+ if not st.session_state.history:
269
+ st.info("No classification history.")
270
+ else:
271
+ for record in reversed(st.session_state.history[-5:]):
272
+ with st.container(border=True):
273
+ col1, col2 = st.columns([1, 4])
274
+ with col1:
275
+ if "thumbnail" in record:
276
+ st.image(io.BytesIO(base64.b64decode(record["thumbnail"])))
277
+ with col2:
278
+ st.markdown(f"**{record['name']}**")
279
+ st.caption(f"Model: `{record['model']}` | {record['time']}")
280
+ if record['predictions']:
281
+ top_pred = record['predictions'][0]
282
+ st.markdown(f"**Top Prediction**: {top_pred['label']} ({top_pred['confidence']:.1f}%)")
283
+ if record['name'] in st.session_state.feedback:
284
+ fb = st.session_state.feedback[record['name']]
285
+ st.markdown(f"Feedback: ⭐{fb['rating']}/5")
286
+ if fb['correction']:
287
+ st.markdown(f"*Suggested correction: {fb['correction']}*")
288
+
289
+ # Download button
290
+ st.download_button(
291
+ "πŸ“₯ Download Results as JSON",
292
+ data=json.dumps(st.session_state.history, indent=2),
293
+ file_name="classification_history.json",
294
+ type='primary',
295
+ use_container_width=True
296
+ )
297
+
298
+ st.markdown("---")
299
+ st.caption("Built with ❀️ using Streamlit")
300
+
301
+ if __name__ == "__main__":
302
+ main()