File size: 11,643 Bytes
ce0b872
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from scipy.cluster.hierarchy import dendrogram, linkage
from io import BytesIO
import base64

# Function to load and preprocess the data
def load_data(file='./Mall_Customers.csv'):
    try:
        if file:
            data = pd.read_csv(file)
            data = data.dropna()
            return data
        else:
            data = pd.read_csv('./Mall_Customers.csv')
            return data
    except Exception as e:
        st.error(f"Error loading data: {e}")
        return None

# Function to preprocess the data
def preprocess_data(data):
    # Drop CustomerID as it is not needed for clustering
    data = data.drop(columns=['CustomerID'])
    # Convert Gender to numerical values
    data['Gender'] = data['Gender'].map({'Male': 0, 'Female': 1})
    # Scale the data
    scaler = StandardScaler()
    scaled_data = scaler.fit_transform(data)
    return scaled_data, data

# Function to perform KMeans clustering
def kmeans_clustering(scaled_data, n_clusters):
    kmeans = KMeans(n_clusters=n_clusters, random_state=42)
    kmeans.fit(scaled_data)
    return kmeans.labels_, kmeans.inertia_

# Function to perform DBSCAN clustering
def dbscan_clustering(scaled_data, eps, min_samples):
    dbscan = DBSCAN(eps=eps, min_samples=min_samples)
    dbscan.fit(scaled_data)
    return dbscan.labels_

# Function to perform Hierarchical Clustering
def hierarchical_clustering(scaled_data, n_clusters):
    hierarchical = AgglomerativeClustering(n_clusters=n_clusters)
    hierarchical.fit(scaled_data)
    return hierarchical.labels_

# Function to perform PCA
def perform_pca(scaled_data, n_components):
    pca = PCA(n_components=n_components)
    pca_data = pca.fit_transform(scaled_data)
    return pca_data, pca

# Function to plot elbow curve
def plot_elbow_curve(scaled_data, max_clusters):
    inertias = []
    for k in range(1, max_clusters + 1):
        kmeans = KMeans(n_clusters=k, random_state=42)
        kmeans.fit(scaled_data)
        inertias.append(kmeans.inertia_)
    plt.figure(figsize=(10, 6))
    plt.plot(range(1, max_clusters + 1), inertias, marker='o')
    plt.title('Elbow Curve')
    plt.xlabel('Number of Clusters')
    plt.ylabel('Inertia')
    st.pyplot(plt)

# Function to plot dendrogram
def plot_dendrogram(scaled_data):
    linked = linkage(scaled_data, 'ward')
    plt.figure(figsize=(10, 6))
    dendrogram(linked, orientation='top', distance_sort='descending', show_leaf_counts=True)
    plt.title('Dendrogram')
    plt.xlabel('Sample Index')
    plt.ylabel('Distance')
    st.pyplot(plt)

# Function to plot scatter plot
def plot_scatter(data, labels, title):
    plt.figure(figsize=(10, 6))
    sns.scatterplot(x=data[:, 0], y=data[:, 1], hue=labels, palette='viridis', s=100)
    plt.title(title)
    plt.xlabel('PCA Component 1')
    plt.ylabel('PCA Component 2')
    st.pyplot(plt)

# Function to calculate silhouette score
def calculate_silhouette_score(scaled_data, labels):
    if len(set(labels)) > 1:
        score = silhouette_score(scaled_data, labels)
        return score
    else:
        return None

# Function to display cluster assignments
def display_cluster_assignments(data, labels):
    data['Cluster'] = labels
    st.write(data)

# Function to allow users to input new data points for prediction
def input_new_data():
    gender = st.selectbox('Gender', ['Male', 'Female'])
    age = st.number_input('Age', min_value=0, max_value=100, value=30)
    annual_income = st.number_input('Annual Income (k$)', min_value=0, value=60)
    spending_score = st.number_input('Spending Score (1-100)', min_value=1, max_value=100, value=50)
    new_data = pd.DataFrame({
        'Gender': [gender],
        'Age': [age],
        'Annual Income (k$)': [annual_income],
        'Spending Score (1-100)': [spending_score]
    })
    new_data['Gender'] = new_data['Gender'].map({'Male': 0, 'Female': 1})
    return new_data

# Function to predict cluster for new data
def predict_cluster(model, scaler, new_data):
    scaled_new_data = scaler.transform(new_data)
    if isinstance(model, DBSCAN):
        # For DBSCAN, we need to use fit_predict on combined data
        combined_data = np.vstack([model.components_, scaled_new_data])
        labels = model.fit_predict(combined_data)
        return [labels[-1]]  # Return the label of the new point
    else:
        cluster = model.predict(scaled_new_data)
        return cluster

# Function to download results
def download_results(data):
    csv = data.to_csv(index=False)
    b64 = base64.b64encode(csv.encode()).decode()
    href = f'<a href="data:file/csv;base64,{b64}" download="cluster_results.csv">Download CSV File</a>'
    return href

# Main function
def main():
    st.title('Unsupervised Learning Web Application')
    st.sidebar.title('Upload Data')
    file = st.sidebar.file_uploader('Upload a CSV file', type=['csv'])
    
    # Initialize variables
    scaled_data = None
    original_data = None
    scaler = None
    pressed = True

    data = load_data(file='./Mall_Customers.csv')
    if data is not None:
        scaled_data, original_data = preprocess_data(data)
        scaler = StandardScaler()
        scaled_data = scaler.fit_transform(original_data)
        st.write('Preprocessed Data:')
        st.write(original_data)

        st.sidebar.title('Unsupervised Learning Algorithms')
        algorithm = st.sidebar.selectbox('Select Algorithm', ['KMeans Clustering', 'DBSCAN Clustering', 'Hierarchical Clustering', 'PCA'])

        if algorithm == 'KMeans Clustering':
            st.title('KMeans Clustering')
            n_clusters = st.slider('Number of Clusters', min_value=2, max_value=10, value=5)
            if st.button('Run KMeans'):
                kmeans = KMeans(n_clusters=n_clusters, random_state=42)
                kmeans.fit(scaled_data)
                labels = kmeans.labels_
                inertia = kmeans.inertia_
                st.write('Cluster Labels:', labels)
                st.write('Inertia:', inertia)
                st.write('Silhouette Score:', calculate_silhouette_score(scaled_data, labels))
                display_cluster_assignments(original_data, labels)
                pca_data, _ = perform_pca(scaled_data, 2)
                plot_scatter(pca_data, labels, 'KMeans Clustering')
                plot_elbow_curve(scaled_data, 10)
                st.markdown(download_results(original_data), unsafe_allow_html=True)

        elif algorithm == 'DBSCAN Clustering':
            st.title('DBSCAN Clustering')
            eps = st.slider('Epsilon', min_value=0.1, max_value=1.0, value=0.5, step=0.1)
            min_samples = st.slider('Minimum Samples', min_value=2, max_value=10, value=5)
            if st.button('Run DBSCAN'):
                labels = dbscan_clustering(scaled_data, eps, min_samples)
                st.write('Cluster Labels:', labels)
                st.write('Silhouette Score:', calculate_silhouette_score(scaled_data, labels))
                display_cluster_assignments(original_data, labels)
                pca_data, _ = perform_pca(scaled_data, 2)
                plot_scatter(pca_data, labels, 'DBSCAN Clustering')
                st.markdown(download_results(original_data), unsafe_allow_html=True)

        elif algorithm == 'Hierarchical Clustering':
            st.title('Hierarchical Clustering')
            n_clusters = st.slider('Number of Clusters', min_value=2, max_value=10, value=5)
            if st.button('Run Hierarchical Clustering'):
                labels = hierarchical_clustering(scaled_data, n_clusters)
                st.write('Cluster Labels:', labels)
                st.write('Silhouette Score:', calculate_silhouette_score(scaled_data, labels))
                display_cluster_assignments(original_data, labels)
                pca_data, _ = perform_pca(scaled_data, 2)
                plot_scatter(pca_data, labels, 'Hierarchical Clustering')
                plot_dendrogram(scaled_data)
                st.markdown(download_results(original_data), unsafe_allow_html=True)

        elif algorithm == 'PCA':
            st.title('Principal Component Analysis')
            n_components = st.slider('Number of Components', min_value=2, max_value=4, value=2)
            if st.button('Run PCA'):
                pca_data, pca = perform_pca(scaled_data, n_components)
                st.write('PCA Components:', pca.components_)
                st.write('Explained Variance Ratio:', pca.explained_variance_ratio_)
                plot_scatter(pca_data, np.zeros(pca_data.shape[0]), 'PCA')
                st.markdown(download_results(pd.DataFrame(pca_data, columns=[f'PC{i+1}' for i in range(n_components)])), unsafe_allow_html=True)

        st.sidebar.title('Input New Data')
        pressed = st.sidebar.button('Predict Cluster')
        st.session_state.button_pressed = getattr(st.session_state, 'button_pressed', False) or pressed
        if st.session_state.button_pressed:
            new_data = input_new_data()
            if algorithm == 'KMeans Clustering':
                kmeans = KMeans(n_clusters=n_clusters, random_state=42)
                kmeans.fit(scaled_data)
                cluster = predict_cluster(kmeans, scaler, new_data)
                st.write('Predicted Cluster:', cluster[0])
                # print(cluster)
            elif algorithm == 'DBSCAN Clustering':
                dbscan = DBSCAN(eps=eps, min_samples=min_samples)
                dbscan.fit(scaled_data)
                cluster = predict_cluster(dbscan, scaler, new_data)
                st.write('Predicted Cluster:', cluster[0])
            elif algorithm == 'Hierarchical Clustering':
                scaled_new_data = scaler.transform(new_data)
                combined_data = np.vstack([scaled_data, scaled_new_data])
                hierarchical = AgglomerativeClustering(n_clusters=n_clusters)
                labels = hierarchical.fit_predict(combined_data)
                cluster = [labels[-1]] 
                st.write('Predicted Cluster:', cluster[0])
            elif algorithm == 'PCA':
                # For PCA, transform the new data point
                scaled_new_data = scaler.transform(new_data)
                pca = PCA(n_components=n_components)
                pca.fit(scaled_data)
                pca_new_data = pca.transform(scaled_new_data)
                st.write('PCA transformed data:', pca_new_data[0])
                # Plot the PCA transformation of the new data point alongside existing data
                pca_data = pca.transform(scaled_data)
                plt.figure(figsize=(10, 6))
                plt.scatter(pca_data[:, 0], pca_data[:, 1], c='blue', alpha=0.5, label='Existing Data')
                plt.scatter(pca_new_data[0, 0], pca_new_data[0, 1], c='red', marker='*', s=200, label='New Data')
                plt.title('PCA Visualization with New Data Point')
                plt.xlabel('PC1')
                plt.ylabel('PC2')
                plt.legend()
                st.pyplot(plt)

        st.sidebar.title('Feature Correlation Analysis')
        if st.sidebar.button('Analyze Correlation'):
            corr_matrix = original_data.corr()
            st.write('Correlation Matrix:')
            st.write(corr_matrix)
            sns.heatmap(corr_matrix, annot=True, cmap='coolwarm')
            st.pyplot(plt)

if __name__ == '__main__':
    main()