File size: 4,584 Bytes
1b57dbd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification, make_circles, make_blobs, make_moons
from sklearn.model_selection import train_test_split, learning_curve
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, f1_score
from mlxtend.plotting import plot_decision_regions

# Display image
# st.markdown("<div style='text-align: center;'>"
#     "<img src='inno.jpg' width='700' height='400'>"
#     "</div>", 
#     unsafe_allow_html=True)
from PIL import Image

# image = Image.open("Inno.jpg")
# image = image.resize((800, 500))  # Resize image before displaying
st.image("innomatics-footer-logo.webp", use_container_width=True, width=300)

# Streamlit app title with color
st.markdown("<h1 style='text-align: center; color: #FF5733;'>Boundary Surfaces Visualization</h1>", unsafe_allow_html=True)

# Select dataset
data = st.sidebar.selectbox('Select Dataset', ('Classification', 'Circles', 'Blobs', 'Moons'))

if data == 'Classification':
    X, y = make_classification(n_samples=100, n_features=2, n_redundant=0, random_state=27)
elif data == 'Circles':
    X, y = make_circles(n_samples=100, factor=0.5, noise=0.05)
elif data == 'Blobs':
    X, y = make_blobs(n_samples=250, centers=2, n_features=2, cluster_std=1.0, random_state=27)
elif data == 'Moons':
    X, y = make_moons(n_samples=250, noise=0.1, random_state=27)

# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=27)

def plot_decision_surface(X, y, model, title):
    plt.figure(figsize=(6,4))
    plot_decision_regions(X, y, clf=model, colors="#7f7f7f,#bcbd22,#17becf")
    plt.title(title)
    st.pyplot(plt.gcf(), clear_figure=True)

# Select classifier
classifier_name = st.sidebar.selectbox('Select Classifier', 
    ('KNN', 'Naive Bayes', 'Logistic Regression', 'Decision Tree', 'Random Forest', 'SVM'))

# Initialize model based on user selection
if classifier_name == 'KNN':
    n_neighbors = st.sidebar.slider('Number of Neighbors (k)', 1, 15, 3)
    weights = st.sidebar.radio('Weight Function', ('uniform', 'distance'))
    algorithm = st.sidebar.selectbox('Algorithm', ('auto', 'ball_tree', 'kd_tree', 'brute'))
    p = st.sidebar.slider("Distance Parameter (p)", 1, 5, 2)
    model = KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm,p=p)

elif classifier_name == 'Naive Bayes':
    model = GaussianNB()

elif classifier_name == 'Logistic Regression':
    model = LogisticRegression()

elif classifier_name == 'Decision Tree':
    model = DecisionTreeClassifier()

elif classifier_name == 'Random Forest':
    n_estimators = st.sidebar.slider('Number of Trees', 10, 200, 100)
    model = RandomForestClassifier(n_estimators=n_estimators)

elif classifier_name == 'SVM':
    kernel = st.sidebar.selectbox('Kernel Type', ('linear', 'poly', 'rbf', 'sigmoid'))
    C = st.sidebar.slider('Regularization (C)', 0.01, 10.0, 1.0)
    model = SVC(kernel=kernel, C=C)

# Train model
model.fit(X_train, y_train)

# Make predictions
y_pred = model.predict(X_test)

# Compute accuracy & F1-score
accuracy = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)

# Display metrics in Streamlit
st.markdown("<h3 style='color: #4CAF50;'>πŸ“Š Model Performance</h3>", unsafe_allow_html=True)
st.write(f"βœ… **Accuracy:** {accuracy:.2f}")
st.write(f"πŸ“ **F1-score:** {f1:.2f}")

# Plot decision boundary
st.subheader("πŸ“ Decision Boundary")
plot_decision_surface(X, y, model, f'{classifier_name} Decision Surface')

# Plot Learning Curve
def plot_learning_curve(model, X, y):
    train_sizes, train_scores, test_scores = learning_curve(model, X, y, cv=5, scoring='accuracy', train_sizes=np.linspace(0.1, 1.0, 10))
    
    train_mean = np.mean(train_scores, axis=1)
    test_mean = np.mean(test_scores, axis=1)
    
    plt.figure(figsize=(6,4))
    plt.plot(train_sizes, train_mean, 'o-', label="Training Accuracy", color="blue")
    plt.plot(train_sizes, test_mean, 'o-', label="Validation Accuracy", color="red")
    
    plt.xlabel("Training Samples")
    plt.ylabel("Accuracy")
    plt.title(f"Learning Curve: {classifier_name}")
    plt.legend()
    st.pyplot(plt.gcf(), clear_figure=True)

# Display Learning Curve
st.subheader("πŸ“ˆ Learning Curve")
plot_learning_curve(model, X, y)