manny1313 commited on
Commit
e06826c
·
1 Parent(s): 29f8052

First Commit

Browse files
Files changed (9) hide show
  1. .gitignore +1 -0
  2. Dockerfile +31 -0
  3. Interface.py +60 -0
  4. image_processing.py +131 -0
  5. label_encoder.pkl +3 -0
  6. main.py +94 -0
  7. model.pkl +3 -0
  8. requirements.txt +9 -0
  9. scaler.pkl +3 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ venv/
Dockerfile ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use official Python image
2
+ FROM python:3.9-slim
3
+
4
+ # Set environment variables
5
+ ENV PYTHONDONTWRITEBYTECODE=1
6
+ ENV PYTHONUNBUFFERED=1
7
+
8
+ # Set work directory
9
+ WORKDIR /app
10
+
11
+ # Install system dependencies
12
+ RUN apt-get update && apt-get install -y \
13
+ build-essential \
14
+ libgl1-mesa-glx \
15
+ libglib2.0-0 \
16
+ libopencv-dev \
17
+ && rm -rf /var/lib/apt/lists/*
18
+
19
+ # Install Python dependencies
20
+ COPY requirements.txt .
21
+ RUN pip install --upgrade pip
22
+ RUN pip install --no-cache-dir -r requirements.txt
23
+
24
+ # Copy project files
25
+ COPY . .
26
+
27
+ # Expose port for FastAPI
28
+ EXPOSE 7860
29
+
30
+ # Command to run the application
31
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
Interface.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import cv2
4
+ import numpy as np
5
+ from PIL import Image
6
+ import image_processing
7
+ import matplotlib.pyplot as plt
8
+
9
+ st.set_page_config(page_title="Soil Image Processor", layout="wide")
10
+
11
+ # Convert PIL image to numpy
12
+ def np_image(pil_img):
13
+ return np.array(pil_img.convert("RGB"))
14
+
15
+ # UI layout
16
+ st.title("🧪 Soil Image Processor")
17
+
18
+ st.subheader("📊 Image Analysis & Prediction")
19
+ uploaded_file = st.file_uploader("Upload an image for prediction", type=["png", "jpg", "jpeg"])
20
+
21
+ if uploaded_file:
22
+ pil_img = Image.open(uploaded_file)
23
+ img_array = np_image(pil_img)
24
+
25
+ # Step 1: Show original image
26
+ st.image(pil_img, caption="Uploaded Image", use_container_width=True)
27
+
28
+ # Step 2: RGB Histogram
29
+ hist_fig = image_processing.plot_rgb_histogram(img_array)
30
+ st.pyplot(hist_fig)
31
+
32
+ # Step 3: Preprocessing
33
+ rgb_img, clahe_img, sharp_img = image_processing.preprocessing(cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB))
34
+ st.image([rgb_img, clahe_img, sharp_img], caption=["RGB Image", "CLAHE Image", "Sharpened Image"], width=250)
35
+
36
+ # Step 4: Choose mode: Whole image vs Region-based
37
+ st.markdown("### 🔍 Classification Mode")
38
+ use_segmentation = st.checkbox("Enable multi-region (segmented) classification")
39
+
40
+ if use_segmentation:
41
+ k = st.slider("Select number of regions (clusters)", min_value=2, max_value=5)
42
+ segmented_image, region_predictions = image_processing.segment_and_classify_regions(img_array, k_clusters=k)
43
+
44
+ st.image(cv2.cvtColor(segmented_image, cv2.COLOR_BGR2RGB), caption="Segmented & Classified", use_column_width=True)
45
+
46
+ st.markdown("### 🧩 Region-wise Predictions")
47
+ for idx, region in enumerate(region_predictions):
48
+ st.write(f"**Region {idx + 1}:** `{region['class']}` with confidence `{region['confidence']:.2f}`")
49
+ x, y, w, h = region['bbox']
50
+ cropped = img_array[y:y+h, x:x+w]
51
+ st.image(cropped, caption=f"Region {idx + 1}", width=200)
52
+
53
+ else:
54
+ # Step 5: Feature Extraction & Prediction on whole image
55
+ features_df, predicted_class, confidence = image_processing.predict_image_class_with_features(cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB))
56
+
57
+ st.subheader("📈 Extracted Features")
58
+ st.dataframe(features_df)
59
+
60
+ st.success(f"🔮 Predicted Class: **{predicted_class}** (Confidence: {confidence:.2f})")
image_processing.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # image_processing.py
2
+ import cv2
3
+ import numpy as np
4
+ import pandas as pd
5
+ import matplotlib.pyplot as plt
6
+ from skimage.feature import graycomatrix, graycoprops, local_binary_pattern
7
+ import joblib
8
+ import warnings
9
+
10
+ warnings.filterwarnings("ignore")
11
+
12
+ # Load model and preprocessing tools
13
+ model = joblib.load("model.pkl")
14
+ label_encoder = joblib.load("label_encoder.pkl")
15
+ scaler = joblib.load("scaler.pkl")
16
+
17
+ # Preprocessing: Resize, apply CLAHE, sharpen
18
+ def preprocessing(single_image, count=1):
19
+ single_image = cv2.resize(single_image, (256,256))
20
+ rgb_image = cv2.cvtColor(single_image, cv2.COLOR_BGR2RGB)
21
+
22
+ r, g, b = cv2.split(rgb_image)
23
+ clahe = cv2.createCLAHE(clipLimit=0.4, tileGridSize=(8, 8))
24
+ r_clahe = clahe.apply(r)
25
+ g_clahe = clahe.apply(g)
26
+ b_clahe = clahe.apply(b)
27
+ clahe_image = cv2.merge((r_clahe, g_clahe, b_clahe))
28
+ clahe_bgr = cv2.cvtColor(clahe_image, cv2.COLOR_RGB2BGR)
29
+
30
+ blurred = cv2.GaussianBlur(clahe_bgr, (5, 5), 1.5)
31
+ sharp = cv2.addWeighted(clahe_bgr, 1.5, blurred, -0.5, 0)
32
+
33
+ return rgb_image, clahe_image, cv2.cvtColor(sharp, cv2.COLOR_BGR2RGB)
34
+
35
+ # RGB histogram plotting
36
+ def plot_rgb_histogram(image):
37
+ color = ('b', 'g', 'r')
38
+ fig, ax = plt.subplots()
39
+ for i, col in enumerate(color):
40
+ hist = cv2.calcHist([image], [i], None, [256], [0,256])
41
+ ax.plot(hist, color=col)
42
+ ax.set_title("RGB Histogram")
43
+ ax.set_xlim([0, 256])
44
+ return fig
45
+
46
+ # Extract features: GLCM, LBP, color, edge, etc.
47
+ def feature_extraction(image, return_df=True):
48
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
49
+ glcm = graycomatrix(gray, distances=[1], angles=[0], levels=256, symmetric=True, normed=True)
50
+ contrast = graycoprops(glcm, 'contrast')[0, 0]
51
+ correlation = graycoprops(glcm, 'correlation')[0, 0]
52
+ energy = graycoprops(glcm, 'energy')[0, 0]
53
+ homogeneity = graycoprops(glcm, 'homogeneity')[0, 0]
54
+
55
+ lbp = local_binary_pattern(gray, P=8, R=1, method='uniform')
56
+ lbp_mean = np.mean(lbp)
57
+
58
+ mean_r = np.mean(image[:, :, 0])
59
+ mean_g = np.mean(image[:, :, 1])
60
+ mean_b = np.mean(image[:, :, 2])
61
+ diff_black = ((1-(mean_r-255)/255) + (1-(mean_g-255)/255) + (1-(mean_b-255)/255))/3
62
+
63
+ sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5)
64
+ sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5)
65
+ edge_count = np.sum(cv2.magnitude(sobelx, sobely) > 0)
66
+
67
+ features = [contrast, correlation, energy, homogeneity, lbp_mean,
68
+ mean_r, mean_g, mean_b, edge_count, diff_black]
69
+
70
+ if return_df:
71
+ df = pd.DataFrame([features], columns=[
72
+ "Contrast", "Correlation", "Energy", "Homogeneity", "LBP_Mean",
73
+ "Mean_R", "Mean_G", "Mean_B", "Edge_Count", "Black"
74
+ ])
75
+ return df
76
+ else:
77
+ return np.array([features])
78
+
79
+ # Predict class and confidence from image
80
+ def predict_image_class_with_features(image):
81
+ _, _, sharp = preprocessing(image, count=0)
82
+ features_df = feature_extraction(sharp)
83
+ features_scaled = scaler.transform(features_df)
84
+ prediction = model.predict(features_scaled)
85
+ predicted_class = label_encoder.inverse_transform(prediction)[0]
86
+ confidence = np.max(model.predict_proba(features_scaled))
87
+ return features_df, predicted_class, confidence
88
+
89
+ # Segment image and classify each region
90
+ def segment_and_classify_regions(image, k_clusters=2):
91
+ rgb_img, _, sharp_img = preprocessing(image)
92
+ reshaped = sharp_img.reshape((-1, 3)).astype(np.float32)
93
+
94
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)
95
+ _, labels, centers = cv2.kmeans(reshaped, k_clusters, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
96
+ segmented = labels.flatten().reshape(sharp_img.shape[:2])
97
+
98
+ output = image.copy()
99
+ region_predictions = []
100
+
101
+ for i in range(k_clusters):
102
+ mask = (segmented == i).astype(np.uint8) * 255
103
+ region = cv2.bitwise_and(sharp_img, sharp_img, mask=mask)
104
+
105
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
106
+ if not contours:
107
+ continue
108
+
109
+ x, y, w, h = cv2.boundingRect(max(contours, key=cv2.contourArea))
110
+ region_crop = region[y:y+h, x:x+w]
111
+
112
+ if region_crop.size == 0:
113
+ continue
114
+
115
+ features_df = feature_extraction(region_crop)
116
+ features_scaled = scaler.transform(features_df)
117
+ prediction = model.predict(features_scaled)
118
+ predicted_class = label_encoder.inverse_transform(prediction)[0]
119
+ confidence = np.max(model.predict_proba(features_scaled))
120
+
121
+ region_predictions.append({
122
+ "class": predicted_class,
123
+ "confidence": confidence,
124
+ "bbox": (x, y, w, h)
125
+ })
126
+
127
+ cv2.rectangle(output, (x, y), (x+w, y+h), (0,255,0), 2)
128
+ cv2.putText(output, f"{predicted_class} ({confidence*100:.1f}%)", (x, y-10),
129
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1)
130
+
131
+ return output, region_predictions
label_encoder.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d49176b41707d28a8a63a38da72d93e246edb9f20347b228724ed4bbfa785868
3
+ size 651
main.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import JSONResponse
4
+ import numpy as np
5
+ import cv2
6
+ import io
7
+ import base64
8
+ from PIL import Image
9
+ import image_processing
10
+
11
+ app = FastAPI(
12
+ title="Soil Image Classification API",
13
+ description="Classifies soil images based on visual features or segmented regions.",
14
+ version="1.0"
15
+ )
16
+
17
+ # Allow CORS (helpful for frontend dev)
18
+ app.add_middleware(
19
+ CORSMiddleware,
20
+ allow_origins=["*"], # You can restrict this to your frontend domain
21
+ allow_credentials=True,
22
+ allow_methods=["*"],
23
+ allow_headers=["*"],
24
+ )
25
+
26
+ # Convert image bytes to OpenCV format
27
+ def read_imagefile(image_bytes):
28
+ image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
29
+ return cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
30
+
31
+ # Encode image (OpenCV format) to base64
32
+ def encode_image_to_base64(image):
33
+ _, buffer = cv2.imencode('.jpg', image)
34
+ return base64.b64encode(buffer).decode('utf-8')
35
+
36
+ @app.get("/")
37
+ def root():
38
+ return {"message": "Soil Image Classifier API is running."}
39
+
40
+ # Whole image prediction
41
+ @app.post("/predictsoil/")
42
+ async def predict_image(file: UploadFile = File(...)):
43
+ try:
44
+ image_bytes = await file.read()
45
+ image = read_imagefile(image_bytes)
46
+
47
+ features_df, predicted_class, confidence = image_processing.predict_image_class_with_features(
48
+ cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
49
+ )
50
+
51
+ return {
52
+ "predicted_class": predicted_class,
53
+ "confidence": float(confidence),
54
+ "features": features_df.to_dict(orient="records")[0]
55
+ }
56
+
57
+ except Exception as e:
58
+ return JSONResponse(status_code=500, content={"error": str(e)})
59
+
60
+ # Region-based prediction
61
+ @app.post("/predict-regions/")
62
+ async def predict_regions(file: UploadFile = File(...), k_clusters: int = 2):
63
+ try:
64
+ image_bytes = await file.read()
65
+ image = read_imagefile(image_bytes)
66
+
67
+ segmented_image, region_predictions = image_processing.segment_and_classify_regions(
68
+ cv2.cvtColor(image, cv2.COLOR_BGR2RGB),
69
+ k_clusters=k_clusters
70
+ )
71
+
72
+ base64_segmented = encode_image_to_base64(cv2.cvtColor(segmented_image, cv2.COLOR_BGR2RGB))
73
+
74
+ results = []
75
+ for region in region_predictions:
76
+ results.append({
77
+ "class": region["class"],
78
+ "confidence": float(region["confidence"]),
79
+ "bbox": {
80
+ "x": region["bbox"][0],
81
+ "y": region["bbox"][1],
82
+ "width": region["bbox"][2],
83
+ "height": region["bbox"][3],
84
+ }
85
+ })
86
+
87
+ return {
88
+ "region_count": len(results),
89
+ "regions": results,
90
+ "segmented_image_base64": base64_segmented
91
+ }
92
+
93
+ except Exception as e:
94
+ return JSONResponse(status_code=500, content={"error": str(e)})
model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76268da8935cd3067c9f4710da47b39e1abaa83a6fe64bd7729c18567c504b7c
3
+ size 670680
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ numpy
4
+ opencv-python
5
+ scikit-image
6
+ scikit-learn
7
+ joblib
8
+ pillow
9
+ matplotlib
scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88236a0f28d80e350f3bb246c230229134e136f8afc5120e25de030153c0b9e7
3
+ size 1303