Update app.py
Browse files
app.py
CHANGED
|
@@ -1,80 +1,74 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
|
| 3 |
-
|
| 4 |
-
ml_content = '''
|
| 5 |
-
## What is Machine Learning (ML) :
|
| 6 |
-
๐ค **Machine Learning (ML)** is a branch of **Artificial Intelligence (AI)** that focuses on developing systems that can learn from and make decisions or predictions based on data. ๐ Instead of being explicitly programmed to perform specific tasks, machine learning algorithms use patterns and insights derived from data to improve their performance over time. ๐
|
| 7 |
-
|
| 8 |
-
At its core, **machine learning** enables computers to act autonomously in situations where explicit instructions are impractical or impossible, making it an essential technology in todayโs data-driven world. ๐
|
| 9 |
-
|
| 10 |
-
'''
|
| 11 |
-
return ml_content
|
| 12 |
-
|
| 13 |
def introduction_to_ml():
|
| 14 |
introduction_blog = '''
|
| 15 |
-
## Introduction to Machine Learning (ML)
|
| 16 |
-
|
| 17 |
|
| 18 |
-
### Types of Machine Learning
|
| 19 |
There are three main types of machine learning:
|
| 20 |
|
| 21 |
-
1.
|
| 22 |
-
Supervised learning algorithms learn from labeled data. The model is trained using a dataset where the input data and the correct output are both provided. The goal is to learn a mapping from inputs to outputs. Examples include linear regression
|
| 23 |
|
| 24 |
-
2.
|
| 25 |
-
In unsupervised learning, the algorithm is given data without any labeled outputs. The goal is to find hidden patterns or groupings in the data. Examples include clustering (e.g., K-means) and dimensionality reduction techniques (e.g., PCA).
|
| 26 |
|
| 27 |
-
3.
|
| 28 |
-
Reinforcement learning involves an agent that learns to make decisions by interacting with an environment to maximize a cumulative reward. It is widely used in robotics
|
| 29 |
|
| 30 |
-
### Popular Machine Learning Algorithms
|
| 31 |
Some of the most commonly used ML algorithms include:
|
| 32 |
|
| 33 |
-
-
|
| 34 |
-
-
|
| 35 |
-
-
|
| 36 |
-
-
|
| 37 |
-
-
|
| 38 |
-
-
|
| 39 |
|
| 40 |
-
#### Applications of Machine Learning
|
| 41 |
Machine learning is used in a wide variety of fields, including:
|
| 42 |
|
| 43 |
-
-
|
| 44 |
-
-
|
| 45 |
-
-
|
| 46 |
-
-
|
| 47 |
|
| 48 |
-
### Conclusion
|
| 49 |
-
Machine learning continues to evolve, with new algorithms, techniques, and applications emerging regularly. As the amount of data grows and computational power increases
|
| 50 |
'''
|
| 51 |
|
| 52 |
return introduction_blog
|
|
|
|
|
|
|
| 53 |
def supervised_learning():
|
| 54 |
supervised = '''
|
| 55 |
-
### Supervised Learning
|
| 56 |
Supervised learning algorithms learn from labeled data. The model is trained using a dataset where the input data and the correct output are both provided. The goal is to learn a mapping from inputs to outputs.
|
| 57 |
|
| 58 |
-
|
| 59 |
-
-
|
| 60 |
```python
|
| 61 |
from sklearn.linear_model import LinearRegression
|
| 62 |
X = [[1], [2], [3], [4], [5]] # Features
|
| 63 |
y = [1, 2, 2.5, 4, 5] # Target
|
| 64 |
model = LinearRegression()
|
| 65 |
model.fit(X, y)
|
| 66 |
-
predictions = model.predict([[6]]) # Predict for 6 hours of study
|
| 67 |
```
|
| 68 |
'''
|
| 69 |
return supervised
|
| 70 |
|
|
|
|
| 71 |
def unsupervised_learning():
|
| 72 |
unsupervised = '''
|
| 73 |
-
### Unsupervised Learning
|
| 74 |
-
In unsupervised learning, the algorithm is given data without any labeled outputs. The goal is to find hidden patterns or groupings in the data. Examples include clustering (e.g., K-means) and dimensionality reduction techniques (e.g., PCA).
|
| 75 |
|
| 76 |
-
|
| 77 |
-
-
|
| 78 |
```python
|
| 79 |
from sklearn.cluster import KMeans
|
| 80 |
X = [[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]]
|
|
@@ -85,13 +79,14 @@ def unsupervised_learning():
|
|
| 85 |
'''
|
| 86 |
return unsupervised
|
| 87 |
|
|
|
|
| 88 |
def reinforcement_learning():
|
| 89 |
reinforcement = '''
|
| 90 |
-
### Reinforcement Learning
|
| 91 |
-
Reinforcement learning involves an agent that learns to make decisions by interacting with an environment to maximize a cumulative reward. It is widely used in robotics
|
| 92 |
|
| 93 |
-
|
| 94 |
-
-
|
| 95 |
```python
|
| 96 |
import numpy as np
|
| 97 |
Q = np.zeros((5, 5)) # Example Q-table for 5 states and 5 actions
|
|
@@ -105,13 +100,15 @@ def reinforcement_learning():
|
|
| 105 |
```
|
| 106 |
'''
|
| 107 |
return reinforcement
|
|
|
|
|
|
|
| 108 |
def linear_regression():
|
| 109 |
linear = '''
|
| 110 |
-
### Linear Regression
|
| 111 |
Linear regression is used to predict a continuous value based on one or more input features. It finds the best-fit line to minimize the error between the predicted and actual values.
|
| 112 |
|
| 113 |
-
|
| 114 |
-
-
|
| 115 |
```python
|
| 116 |
from sklearn.linear_model import LinearRegression
|
| 117 |
X = [[1], [2], [3], [4], [5]] # Features (e.g., years of experience)
|
|
@@ -123,13 +120,14 @@ def linear_regression():
|
|
| 123 |
'''
|
| 124 |
return linear
|
| 125 |
|
|
|
|
| 126 |
def logistic_regression():
|
| 127 |
logistic = '''
|
| 128 |
-
### Logistic Regression
|
| 129 |
Logistic regression is used for binary classification tasks, where the goal is to predict one of two outcomes, such as pass/fail or spam/not spam.
|
| 130 |
|
| 131 |
-
|
| 132 |
-
-
|
| 133 |
```python
|
| 134 |
from sklearn.linear_model import LogisticRegression
|
| 135 |
from sklearn.datasets import load_iris
|
|
@@ -143,13 +141,14 @@ def logistic_regression():
|
|
| 143 |
'''
|
| 144 |
return logistic
|
| 145 |
|
|
|
|
| 146 |
def decision_trees():
|
| 147 |
decision = '''
|
| 148 |
-
### Decision Trees
|
| 149 |
Decision trees split the data into subsets based on feature values, creating a tree-like model. It is used for both classification and regression tasks.
|
| 150 |
|
| 151 |
-
|
| 152 |
-
-
|
| 153 |
```python
|
| 154 |
from sklearn.tree import DecisionTreeClassifier
|
| 155 |
from sklearn.datasets import load_iris
|
|
@@ -163,13 +162,14 @@ def decision_trees():
|
|
| 163 |
'''
|
| 164 |
return decision
|
| 165 |
|
|
|
|
| 166 |
def knn():
|
| 167 |
knn = '''
|
| 168 |
-
### K-Nearest Neighbors (KNN)
|
| 169 |
KNN is a simple, non-parametric algorithm that classifies data based on the majority vote of its nearest neighbors.
|
| 170 |
|
| 171 |
-
|
| 172 |
-
-
|
| 173 |
```python
|
| 174 |
from sklearn.neighbors import KNeighborsClassifier
|
| 175 |
from sklearn.datasets import load_iris
|
|
@@ -183,65 +183,31 @@ def knn():
|
|
| 183 |
'''
|
| 184 |
return knn
|
| 185 |
|
|
|
|
| 186 |
def svm():
|
| 187 |
svm = '''
|
| 188 |
-
### Support Vector Machines (SVM)
|
| 189 |
-
SVM is a powerful classifier that works well for high-dimensional data. It tries to find the hyperplane that best separates the data points of different
|
| 190 |
-
|
| 191 |
-
**Example**:
|
| 192 |
-
- **Classifying Iris Flowers**: An SVM can be used to classify Iris flowers into different species.
|
| 193 |
-
```python
|
| 194 |
-
from sklearn.svm import SVC
|
| 195 |
-
from sklearn.datasets import load_iris
|
| 196 |
-
data = load_iris()
|
| 197 |
-
X = data.data
|
| 198 |
-
y = data.target
|
| 199 |
-
model = SVC(kernel='linear')
|
| 200 |
-
model.fit(X, y)
|
| 201 |
-
predictions = model.predict(X)
|
| 202 |
-
```
|
| 203 |
'''
|
| 204 |
return svm
|
| 205 |
-
|
| 206 |
-
def neural_networks():
|
| 207 |
-
neural = '''
|
| 208 |
-
### Neural Networks
|
| 209 |
-
Neural networks are modeled after the human brain, with layers of interconnected nodes (neurons) used for tasks like image and speech recognition.
|
| 210 |
-
|
| 211 |
-
**Example**:
|
| 212 |
-
- **Classifying Handwritten Digits**: A simple neural network can be used to classify digits from the MNIST dataset.
|
| 213 |
-
```python
|
| 214 |
-
from sklearn.neural_network import MLPClassifier
|
| 215 |
-
from sklearn.datasets import load_iris
|
| 216 |
-
data = load_iris()
|
| 217 |
-
X = data.data
|
| 218 |
-
y = data.target
|
| 219 |
-
model = MLPClassifier(hidden_layer_sizes=(10,), max_iter=1000)
|
| 220 |
-
model.fit(X, y)
|
| 221 |
-
predictions = model.predict(X)
|
| 222 |
-
```
|
| 223 |
-
'''
|
| 224 |
-
return neural
|
| 225 |
-
|
| 226 |
-
# Sidebar for content navigation
|
| 227 |
-
# Sidebar for content navigation
|
| 228 |
st.sidebar.header("๐ Contents")
|
| 229 |
|
| 230 |
# Show Introduction first in the sidebar
|
| 231 |
-
page = st.sidebar.radio("
|
| 232 |
["Introduction", "Types of Machine Learning", "Popular Algorithms"])
|
| 233 |
|
| 234 |
# Conditional options based on sidebar selection
|
| 235 |
if page == "Types of Machine Learning":
|
| 236 |
types_of_ml = st.sidebar.radio("๐ Types of Machine Learning",
|
| 237 |
-
["Supervised Learning", "Unsupervised Learning", "Reinforcement Learning"])
|
| 238 |
else:
|
| 239 |
types_of_ml = None
|
| 240 |
|
| 241 |
if page == "Popular Algorithms":
|
| 242 |
popular_algorithms = st.sidebar.radio("๐ Popular Algorithms",
|
| 243 |
-
["Linear Regression", "Logistic Regression", "Decision Trees",
|
| 244 |
-
"K-Nearest Neighbors (KNN)", "Support Vector Machines (SVM)", "Neural Networks"])
|
| 245 |
else:
|
| 246 |
popular_algorithms = None
|
| 247 |
|
|
@@ -252,22 +218,22 @@ st.markdown("<h1 style='text-align: center; color: orange;'>Machine Learning (ML
|
|
| 252 |
if page == "Introduction":
|
| 253 |
st.markdown(introduction_to_ml())
|
| 254 |
|
| 255 |
-
elif types_of_ml == "Supervised Learning":
|
| 256 |
st.markdown(supervised_learning())
|
| 257 |
-
elif types_of_ml == "Unsupervised Learning":
|
| 258 |
st.markdown(unsupervised_learning())
|
| 259 |
-
elif types_of_ml == "Reinforcement Learning":
|
| 260 |
st.markdown(reinforcement_learning())
|
| 261 |
|
| 262 |
-
elif popular_algorithms == "Linear Regression":
|
| 263 |
st.markdown(linear_regression())
|
| 264 |
-
elif popular_algorithms == "Logistic Regression":
|
| 265 |
st.markdown(logistic_regression())
|
| 266 |
-
elif popular_algorithms == "Decision Trees":
|
| 267 |
st.markdown(decision_trees())
|
| 268 |
-
elif popular_algorithms == "K-Nearest Neighbors (KNN)":
|
| 269 |
st.markdown(knn())
|
| 270 |
-
elif popular_algorithms == "Support Vector Machines (SVM)":
|
| 271 |
st.markdown(svm())
|
| 272 |
-
elif popular_algorithms == "Neural Networks":
|
| 273 |
-
st.markdown(neural_networks())
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
|
| 3 |
+
# Introduction to Machine Learning
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
def introduction_to_ml():
|
| 5 |
introduction_blog = '''
|
| 6 |
+
## ๐ค Introduction to Machine Learning (ML)
|
| 7 |
+
Machine Learning (ML) is a subset of Artificial Intelligence (AI) that enables computers to learn from data and make predictions or decisions without being explicitly programmed. It has revolutionized many industries and plays a crucial role in technologies such as self-driving cars ๐, recommendation systems ๐ฑ, and facial recognition ๐๏ธ.
|
| 8 |
|
| 9 |
+
### ๐ Types of Machine Learning
|
| 10 |
There are three main types of machine learning:
|
| 11 |
|
| 12 |
+
1. **๐ Supervised Learning**:
|
| 13 |
+
Supervised learning algorithms learn from labeled data. The model is trained using a dataset where the input data and the correct output are both provided. The goal is to learn a mapping from inputs to outputs. Examples include linear regression ๐, logistic regression ๐งโ๐ป, and decision trees ๐ณ.
|
| 14 |
|
| 15 |
+
2. **๐ Unsupervised Learning**:
|
| 16 |
+
In unsupervised learning, the algorithm is given data without any labeled outputs. The goal is to find hidden patterns or groupings in the data. Examples include clustering ๐ง (e.g., K-means) and dimensionality reduction techniques ๐๏ธ (e.g., PCA).
|
| 17 |
|
| 18 |
+
3. **๐
Reinforcement Learning**:
|
| 19 |
+
Reinforcement learning involves an agent that learns to make decisions by interacting with an environment to maximize a cumulative reward. It is widely used in robotics ๐ค, game AI ๐ฎ, and real-time decision-making systems.
|
| 20 |
|
| 21 |
+
### ๐ Popular Machine Learning Algorithms
|
| 22 |
Some of the most commonly used ML algorithms include:
|
| 23 |
|
| 24 |
+
- **๐ Linear Regression**: A simple algorithm used for predicting continuous values.
|
| 25 |
+
- **๐ Logistic Regression**: Used for binary classification problems.
|
| 26 |
+
- **๐ณ Decision Trees**: A tree-like model used for both classification and regression tasks.
|
| 27 |
+
- **๐ K-Nearest Neighbors (KNN)**: A non-parametric method used for classification and regression.
|
| 28 |
+
- **โก Support Vector Machines (SVM)**: A powerful classifier that works well for high-dimensional spaces.
|
| 29 |
+
- **๐ง Neural Networks**: A set of algorithms, modeled after the human brain, that are used for complex tasks like image and speech recognition.
|
| 30 |
|
| 31 |
+
#### ๐ Applications of Machine Learning
|
| 32 |
Machine learning is used in a wide variety of fields, including:
|
| 33 |
|
| 34 |
+
- **๐ฅ Healthcare**: ML is used for predicting diseases, recommending treatments, and analyzing medical data.
|
| 35 |
+
- **๐ฐ Finance**: Used for fraud detection, algorithmic trading, and risk analysis.
|
| 36 |
+
- **๐๏ธ E-commerce**: ML powers recommendation systems, personalized marketing, and customer support chatbots.
|
| 37 |
+
- **๐ Self-driving Cars**: ML algorithms help autonomous vehicles navigate and make real-time decisions.
|
| 38 |
|
| 39 |
+
### ๐ Conclusion
|
| 40 |
+
Machine learning continues to evolve, with new algorithms, techniques, and applications emerging regularly. As the amount of data grows ๐ and computational power increases โก, the potential of ML to impact industries and improve our daily lives is limitless.
|
| 41 |
'''
|
| 42 |
|
| 43 |
return introduction_blog
|
| 44 |
+
|
| 45 |
+
# Supervised Learning
|
| 46 |
def supervised_learning():
|
| 47 |
supervised = '''
|
| 48 |
+
### ๐ Supervised Learning
|
| 49 |
Supervised learning algorithms learn from labeled data. The model is trained using a dataset where the input data and the correct output are both provided. The goal is to learn a mapping from inputs to outputs.
|
| 50 |
|
| 51 |
+
**๐ Example**:
|
| 52 |
+
- **๐ Linear Regression**: Used to predict a continuous value, such as predicting house prices ๐ .
|
| 53 |
```python
|
| 54 |
from sklearn.linear_model import LinearRegression
|
| 55 |
X = [[1], [2], [3], [4], [5]] # Features
|
| 56 |
y = [1, 2, 2.5, 4, 5] # Target
|
| 57 |
model = LinearRegression()
|
| 58 |
model.fit(X, y)
|
| 59 |
+
predictions = model.predict([[6]]) # Predict for 6 hours of study ๐
|
| 60 |
```
|
| 61 |
'''
|
| 62 |
return supervised
|
| 63 |
|
| 64 |
+
# Unsupervised Learning
|
| 65 |
def unsupervised_learning():
|
| 66 |
unsupervised = '''
|
| 67 |
+
### ๐ Unsupervised Learning
|
| 68 |
+
In unsupervised learning, the algorithm is given data without any labeled outputs. The goal is to find hidden patterns or groupings in the data. Examples include clustering ๐ง (e.g., K-means) and dimensionality reduction techniques ๐๏ธ (e.g., PCA).
|
| 69 |
|
| 70 |
+
**๐ Example**:
|
| 71 |
+
- **๐ K-Means Clustering**: Grouping data points into clusters based on similarity.
|
| 72 |
```python
|
| 73 |
from sklearn.cluster import KMeans
|
| 74 |
X = [[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]]
|
|
|
|
| 79 |
'''
|
| 80 |
return unsupervised
|
| 81 |
|
| 82 |
+
# Reinforcement Learning
|
| 83 |
def reinforcement_learning():
|
| 84 |
reinforcement = '''
|
| 85 |
+
### ๐
Reinforcement Learning
|
| 86 |
+
Reinforcement learning involves an agent that learns to make decisions by interacting with an environment to maximize a cumulative reward. It is widely used in robotics ๐ค, game AI ๐ฎ, and real-time decision-making systems.
|
| 87 |
|
| 88 |
+
**๐ Example**:
|
| 89 |
+
- **๐ Q-Learning**: A reinforcement learning algorithm where an agent learns to maximize rewards by updating Q-values.
|
| 90 |
```python
|
| 91 |
import numpy as np
|
| 92 |
Q = np.zeros((5, 5)) # Example Q-table for 5 states and 5 actions
|
|
|
|
| 100 |
```
|
| 101 |
'''
|
| 102 |
return reinforcement
|
| 103 |
+
|
| 104 |
+
# Linear Regression
|
| 105 |
def linear_regression():
|
| 106 |
linear = '''
|
| 107 |
+
### ๐ Linear Regression
|
| 108 |
Linear regression is used to predict a continuous value based on one or more input features. It finds the best-fit line to minimize the error between the predicted and actual values.
|
| 109 |
|
| 110 |
+
**๐ Example**:
|
| 111 |
+
- **๐ Predicting House Prices**: Predict the price of a house based on its features such as size and location.
|
| 112 |
```python
|
| 113 |
from sklearn.linear_model import LinearRegression
|
| 114 |
X = [[1], [2], [3], [4], [5]] # Features (e.g., years of experience)
|
|
|
|
| 120 |
'''
|
| 121 |
return linear
|
| 122 |
|
| 123 |
+
# Logistic Regression
|
| 124 |
def logistic_regression():
|
| 125 |
logistic = '''
|
| 126 |
+
### ๐ Logistic Regression
|
| 127 |
Logistic regression is used for binary classification tasks, where the goal is to predict one of two outcomes, such as pass/fail or spam/not spam.
|
| 128 |
|
| 129 |
+
**๐ Example**:
|
| 130 |
+
- **๐ง Predicting Spam Emails**: Classifying emails as spam or not spam.
|
| 131 |
```python
|
| 132 |
from sklearn.linear_model import LogisticRegression
|
| 133 |
from sklearn.datasets import load_iris
|
|
|
|
| 141 |
'''
|
| 142 |
return logistic
|
| 143 |
|
| 144 |
+
# Decision Trees
|
| 145 |
def decision_trees():
|
| 146 |
decision = '''
|
| 147 |
+
### ๐ณ Decision Trees
|
| 148 |
Decision trees split the data into subsets based on feature values, creating a tree-like model. It is used for both classification and regression tasks.
|
| 149 |
|
| 150 |
+
**๐ Example**:
|
| 151 |
+
- **๐ธ Classifying Iris Species**: A decision tree can be used to classify different species of Iris flowers.
|
| 152 |
```python
|
| 153 |
from sklearn.tree import DecisionTreeClassifier
|
| 154 |
from sklearn.datasets import load_iris
|
|
|
|
| 162 |
'''
|
| 163 |
return decision
|
| 164 |
|
| 165 |
+
# K-Nearest Neighbors (KNN)
|
| 166 |
def knn():
|
| 167 |
knn = '''
|
| 168 |
+
### ๐ K-Nearest Neighbors (KNN)
|
| 169 |
KNN is a simple, non-parametric algorithm that classifies data based on the majority vote of its nearest neighbors.
|
| 170 |
|
| 171 |
+
**๐ Example**:
|
| 172 |
+
- **๐ Classifying a Data Point**: Predict the class of a data point based on its nearest neighbors.
|
| 173 |
```python
|
| 174 |
from sklearn.neighbors import KNeighborsClassifier
|
| 175 |
from sklearn.datasets import load_iris
|
|
|
|
| 183 |
'''
|
| 184 |
return knn
|
| 185 |
|
| 186 |
+
# Support Vector Machines (SVM)
|
| 187 |
def svm():
|
| 188 |
svm = '''
|
| 189 |
+
### โก Support Vector Machines (SVM)
|
| 190 |
+
SVM is a powerful classifier that works well for high-dimensional data. It tries to find the hyperplane that best separates the data points of different
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
'''
|
| 192 |
return svm
|
| 193 |
+
# Sidebar for content navigation with emojis
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 194 |
st.sidebar.header("๐ Contents")
|
| 195 |
|
| 196 |
# Show Introduction first in the sidebar
|
| 197 |
+
page = st.sidebar.radio("๐ Select a Topic",
|
| 198 |
["Introduction", "Types of Machine Learning", "Popular Algorithms"])
|
| 199 |
|
| 200 |
# Conditional options based on sidebar selection
|
| 201 |
if page == "Types of Machine Learning":
|
| 202 |
types_of_ml = st.sidebar.radio("๐ Types of Machine Learning",
|
| 203 |
+
["๐ธ Supervised Learning", "๐ธ Unsupervised Learning", "๐ธ Reinforcement Learning"])
|
| 204 |
else:
|
| 205 |
types_of_ml = None
|
| 206 |
|
| 207 |
if page == "Popular Algorithms":
|
| 208 |
popular_algorithms = st.sidebar.radio("๐ Popular Algorithms",
|
| 209 |
+
["๐ Linear Regression", "๐ Logistic Regression", "๐ณ Decision Trees",
|
| 210 |
+
"๐ K-Nearest Neighbors (KNN)", "โก Support Vector Machines (SVM)", "๐ง Neural Networks"])
|
| 211 |
else:
|
| 212 |
popular_algorithms = None
|
| 213 |
|
|
|
|
| 218 |
if page == "Introduction":
|
| 219 |
st.markdown(introduction_to_ml())
|
| 220 |
|
| 221 |
+
elif types_of_ml == "๐ธ Supervised Learning":
|
| 222 |
st.markdown(supervised_learning())
|
| 223 |
+
elif types_of_ml == "๐ธ Unsupervised Learning":
|
| 224 |
st.markdown(unsupervised_learning())
|
| 225 |
+
elif types_of_ml == "๐ธ Reinforcement Learning":
|
| 226 |
st.markdown(reinforcement_learning())
|
| 227 |
|
| 228 |
+
elif popular_algorithms == "๐ Linear Regression":
|
| 229 |
st.markdown(linear_regression())
|
| 230 |
+
elif popular_algorithms == "๐ Logistic Regression":
|
| 231 |
st.markdown(logistic_regression())
|
| 232 |
+
elif popular_algorithms == "๐ณ Decision Trees":
|
| 233 |
st.markdown(decision_trees())
|
| 234 |
+
elif popular_algorithms == "๐ K-Nearest Neighbors (KNN)":
|
| 235 |
st.markdown(knn())
|
| 236 |
+
elif popular_algorithms == "โก Support Vector Machines (SVM)":
|
| 237 |
st.markdown(svm())
|
| 238 |
+
elif popular_algorithms == "๐ง Neural Networks":
|
| 239 |
+
st.markdown(neural_networks())
|