deedrop1140 commited on
Commit
d0a6b4f
·
verified ·
1 Parent(s): 2f51d68

Upload 182 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +57 -0
  3. Models/label_encoder.joblib +3 -0
  4. Models/label_encoder.pkl +3 -0
  5. Models/liar_vectorizer.joblib +3 -0
  6. Models/linear_model.pkl +3 -0
  7. Models/logistic_model.pkl +3 -0
  8. Models/logvectorizer.pkl +3 -0
  9. Models/nb_url_model.pkl +3 -0
  10. Models/nb_url_vectorizer.pkl +3 -0
  11. Models/poly_model.pkl +3 -0
  12. Models/poly_transform.pkl +3 -0
  13. Models/rf_model.pkl +3 -0
  14. Models/ridge_model.pkl +3 -0
  15. Models/ridge_scaler.pkl +3 -0
  16. Models/supervised_model.pkl +3 -0
  17. Models/svr_model.pkl +3 -0
  18. Models/svr_scaler_X.pkl +3 -0
  19. Models/svr_scaler_y.pkl +3 -0
  20. Models/tfidf_vectorizer.pkl +3 -0
  21. Models/url_vectorizer.pkl +3 -0
  22. Models/vectorizer.joblib +3 -0
  23. Models/voting_url_model.pkl +3 -0
  24. README.md +2 -11
  25. Static/decision_tree.png +3 -0
  26. Static/js/lasso_charts.js +225 -0
  27. Static/js/linear.js +263 -0
  28. Static/js/poly.js +85 -0
  29. Static/knn.js +71 -0
  30. Static/svr_linear.png +0 -0
  31. Static/svr_poly.png +0 -0
  32. Static/svr_rbf.png +0 -0
  33. Static/uploads/Figure_1.png +0 -0
  34. Static/uploads/compressed_clean.jpg +0 -0
  35. Static/uploads/digit_0.png +0 -0
  36. Static/uploads/digit_4.png +0 -0
  37. Static/uploads/download.jpg +0 -0
  38. Static/uploads/download.png +0 -0
  39. Static/uploads/download_1.jpg +0 -0
  40. Static/uploads/download_2.jpg +0 -0
  41. Static/uploads/input.jpg +0 -0
  42. Static/uploads/kmeans.png +0 -0
  43. Static/uploads/test_digit.png +0 -0
  44. Static/uploads/test_digit_8.png +0 -0
  45. Static/uploads/test_digit_8_1.png +0 -0
  46. app.py +2373 -0
  47. auth/__init__.py +0 -0
  48. auth/email.py +12 -0
  49. auth/extensions.py +3 -0
  50. auth/jwt_utils.py +4 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Static/decision_tree.png filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # Use official Python base image
2
+ # FROM python:3.10-slim
3
+
4
+ # # Avoid Python buffering
5
+ # ENV PYTHONUNBUFFERED=1
6
+
7
+ # # Set work directory
8
+ # WORKDIR /app
9
+
10
+ # # Install system dependencies
11
+ # RUN apt-get update && apt-get install -y \
12
+ # build-essential \
13
+ # git \
14
+ # curl \
15
+ # && rm -rf /var/lib/apt/lists/*
16
+
17
+ # # Copy requirements.txt and install
18
+ # COPY requirements.txt .
19
+ # RUN pip install --upgrade pip && pip install -r requirements.txt
20
+
21
+ # # Copy project files
22
+ # COPY . .
23
+
24
+ # # Expose port (Hugging Face expects 7860 by default, but Flask usually runs 5000)
25
+ # EXPOSE 5000
26
+
27
+ # # Set environment variable for Flask
28
+ # ENV PORT=5000
29
+ # ENV FLASK_APP=app.py
30
+
31
+ # # Run Flask
32
+ # CMD ["flask", "run", "--host", "0.0.0.0", "--port", "5000"]
33
+
34
+ # Use lightweight Python image
35
+ FROM python:3.10-slim
36
+
37
+ # Environment variables
38
+ ENV PYTHONUNBUFFERED=1
39
+ ENV TF_CPP_MIN_LOG_LEVEL=2
40
+
41
+ # Set working directory
42
+ WORKDIR /app
43
+
44
+ # Copy and install dependencies first (cache-friendly)
45
+ COPY requirements.txt .
46
+ RUN pip install --no-cache-dir --upgrade pip \
47
+ && pip install --no-cache-dir -r requirements.txt
48
+
49
+ # Copy app code
50
+ COPY . .
51
+
52
+ # Render provides PORT automatically — DO NOT hardcode
53
+ CMD ["python", "app.py"]
54
+
55
+
56
+
57
+
Models/label_encoder.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:865b0dbea4a93bf730753224d4e047c046ae29bf9b2aea0c7be7d49117a886bc
3
+ size 585
Models/label_encoder.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42d9d0139ea16bc79a275b08e1e97c8c3075f91279b211fa3a635786f26c015e
3
+ size 592
Models/liar_vectorizer.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c26d15ecdbe5770f3b01b015d4ebb565d20e9e3a9a477b397a875857812a7cf4
3
+ size 184539
Models/linear_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e686db9126ad24dbdd3eaee6b9915cce209e0c703e3279c23787cdb3f1fa6e7a
3
+ size 577
Models/logistic_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57c8921a04cc148eb213bc4e1d21bf7d4e027401ea0dbe272567d6d6dd12d920
3
+ size 40863
Models/logvectorizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e51b1d8b6c8975d5469c9c7540af43fab5ac2bdce0008d7109cfdab4fd481917
3
+ size 160142
Models/nb_url_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c88eb0fb2fb0b99144d1f59e4a9868a5a09c2143649a2e5611931f9271fadf11
3
+ size 22222423
Models/nb_url_vectorizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1930da4d5837bdbf45094b03047a1c9a4febd8d37871b08dc4259fe7d723e852
3
+ size 14448425
Models/poly_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56c2a0cbd48a3349e662adb7120e361a3d31c11e457690a3315f778c5eac10f2
3
+ size 609
Models/poly_transform.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9dc1dfc5979d069bdb7c33289547d02668adfe29739f31519cef264c1bb1b57
3
+ size 255
Models/rf_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf25c22e41534fe505d74c6c5cd7e6e6cf5a0d76fa75f1bb58df2c949ee58a5a
3
+ size 102017
Models/ridge_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec7a5044c24138f0ce707e2a9b0e936c8a44a87009dbe2039fffa52dfd6ddab2
3
+ size 593
Models/ridge_scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:700c6f8cd087cd8183e3e923406b37414e106068de5d335378bce049081b1862
3
+ size 1039
Models/supervised_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:563a76099009bb0d525d7178e6901903bb38037b80e354ceb0fed0697e755f92
3
+ size 576
Models/svr_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a53c8cb144b241a532575dd98f7e0a3a00cb96e0e1b86ca3865aa420a08fd47c
3
+ size 42141
Models/svr_scaler_X.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd01177f9512e5b7165080eac192c4434001a0650911d1af49603245dd395372
3
+ size 722
Models/svr_scaler_y.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e9f5568bef89410981f949a8af69c55bd631fc0a5166d48ff52014629bc6956
3
+ size 474
Models/tfidf_vectorizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:909c6d43daee911d09fc013149f2a7cbf2da5afbdb8ae01f8057641bde4f8ce7
3
+ size 226415
Models/url_vectorizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1137c32cf449a9820f0128cb5b170e480b38169639c94e75f15fff578abb9df8
3
+ size 140312
Models/vectorizer.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b69e7d0c042a50411c148bec8240a3756aa7d2057931c55a85659d673c1bc8e6
3
+ size 183179
Models/voting_url_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eeb355c7e7339439d73909b118de4befd5257d2a04208d1d0a36bd71f52f57c
3
+ size 8767014
README.md CHANGED
@@ -1,11 +1,2 @@
1
- ---
2
- title: Machinelearningalgorithms
3
- emoji: 🏢
4
- colorFrom: yellow
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
8
- short_description: machinelearningalgor
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ # machine-learning
2
+ it is machine learning wesite
 
 
 
 
 
 
 
 
 
Static/decision_tree.png ADDED

Git LFS Details

  • SHA256: 6b8cdfb3ac950b37d03f7ddabd674789f9509bb23b2d3ff61bcd228a72bfafd8
  • Pointer size: 131 Bytes
  • Size of remote file: 329 kB
Static/js/lasso_charts.js ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ document.addEventListener('DOMContentLoaded', function() {
2
+ console.log("lasso_charts.js loaded and DOM fully parsed.");
3
+
4
+ const form = document.getElementById('predictionForm');
5
+ const loadingSpinner = document.getElementById('loadingSpinner');
6
+
7
+ form.addEventListener('submit', function() {
8
+ loadingSpinner.classList.remove('hidden'); // Show loading spinner
9
+ });
10
+
11
+ // --- Example for a Coefficient Path Chart (Conceptual with Chart.js) ---
12
+ const alphaValues = [0.01, 0.1, 0.5, 1, 2, 5, 10];
13
+ const dummyCoefficients = {
14
+ 'OverallQual': [0.8, 0.7, 0.5, 0.3, 0.1, 0, 0],
15
+ 'GrLivArea': [1.2, 1.1, 0.9, 0.7, 0.5, 0.2, 0.1],
16
+ 'GarageCars': [0.5, 0.4, 0.3, 0.1, 0, 0, 0],
17
+ 'TotalBsmtSF': [0.6, 0.5, 0.4, 0.2, 0.1, 0.05, 0],
18
+ 'YearBuilt': [0.3, 0.2, 0.1, 0.05, 0, 0, 0]
19
+ };
20
+
21
+ const ctxCoeff = document.getElementById('coefficientPathChart');
22
+ if (ctxCoeff) {
23
+ new Chart(ctxCoeff, {
24
+ type: 'line',
25
+ data: {
26
+ labels: alphaValues.map(a => `λ=${a}`),
27
+ datasets: Object.keys(dummyCoefficients).map(feature => ({
28
+ label: feature,
29
+ data: dummyCoefficients[feature],
30
+ borderColor: getRandomColor(),
31
+ fill: false,
32
+ tension: 0.1
33
+ }))
34
+ },
35
+ options: {
36
+ responsive: true,
37
+ maintainAspectRatio: false,
38
+ plugins: {
39
+ title: {
40
+ display: true,
41
+ text: 'Coefficient Path for Different Lambda (α) Values'
42
+ },
43
+ tooltip: {
44
+ mode: 'index',
45
+ intersect: false,
46
+ },
47
+ },
48
+ scales: {
49
+ x: {
50
+ title: {
51
+ display: true,
52
+ text: 'Regularization Strength (λ)'
53
+ }
54
+ },
55
+ y: {
56
+ title: {
57
+ display: true,
58
+ text: 'Coefficient Value'
59
+ }
60
+ }
61
+ }
62
+ }
63
+ });
64
+ }
65
+
66
+ // --- Example for Feature Importance Bar Chart (Conceptual with Chart.js) ---
67
+ const finalCoefficients = {
68
+ 'OverallQual': 0.65,
69
+ 'GrLivArea': 0.82,
70
+ 'GarageCars': 0.15,
71
+ 'TotalBsmtSF': 0.38,
72
+ 'YearBuilt': 0.07
73
+ };
74
+ const featureLabels = Object.keys(finalCoefficients);
75
+ const featureValues = Object.values(finalCoefficients).map(Math.abs);
76
+
77
+ const ctxFeature = document.getElementById('featureImportanceChart');
78
+ if (ctxFeature) {
79
+ new Chart(ctxFeature, {
80
+ type: 'bar',
81
+ data: {
82
+ labels: featureLabels,
83
+ datasets: [{
84
+ label: 'Absolute Coefficient Value',
85
+ data: featureValues,
86
+ backgroundColor: 'rgba(54, 162, 235, 0.7)',
87
+ borderColor: 'rgba(54, 162, 235, 1)',
88
+ borderWidth: 1
89
+ }]
90
+ },
91
+ options: {
92
+ responsive: true,
93
+ maintainAspectRatio: false,
94
+ plugins: {
95
+ title: {
96
+ display: true,
97
+ text: 'Feature Importance (Absolute Coefficients)'
98
+ },
99
+ legend: {
100
+ display: false
101
+ }
102
+ },
103
+ scales: {
104
+ y: {
105
+ beginAtZero: true,
106
+ title: {
107
+ display: true,
108
+ text: 'Absolute Coefficient Value'
109
+ }
110
+ },
111
+ x: {
112
+ title: {
113
+ display: true,
114
+ text: 'Features'
115
+ }
116
+ }
117
+ }
118
+ }
119
+ });
120
+ }
121
+
122
+ // --- Example for Predicted vs Actual Chart (Conceptual with Chart.js) ---
123
+ const actualPrices = [200000, 250000, 180000, 300000, 220000, 270000, 190000, 310000];
124
+ const predictedPrices = [210000, 245000, 175000, 310000, 215000, 280000, 195000, 300000];
125
+ const dataPoints = actualPrices.map((actual, index) => ({
126
+ x: actual,
127
+ y: predictedPrices[index]
128
+ }));
129
+
130
+ const ctxPredActual = document.getElementById('predictionActualChart');
131
+ if (ctxPredActual) {
132
+ new Chart(ctxPredActual, {
133
+ type: 'scatter',
134
+ data: {
135
+ datasets: [{
136
+ label: 'Predicted vs. Actual',
137
+ data: dataPoints,
138
+ backgroundColor: 'rgba(75, 192, 192, 0.8)',
139
+ pointRadius: 5
140
+ }, {
141
+ label: 'Ideal Prediction',
142
+ data: [{x: Math.min(...actualPrices, ...predictedPrices), y: Math.min(...actualPrices, ...predictedPrices)},
143
+ {x: Math.max(...actualPrices, ...predictedPrices), y: Math.max(...actualPrices, ...predictedPrices)}],
144
+ borderColor: 'rgba(255, 99, 132, 0.8)',
145
+ borderWidth: 2,
146
+ pointRadius: 0,
147
+ type: 'line',
148
+ fill: false,
149
+ tension: 0
150
+ }]
151
+ },
152
+ options: {
153
+ responsive: true,
154
+ maintainAspectRatio: false,
155
+ plugins: {
156
+ title: {
157
+ display: true,
158
+ text: 'Predicted vs. Actual Prices'
159
+ },
160
+ tooltip: {
161
+ callbacks: {
162
+ label: function(context) {
163
+ return `Actual: $${context.parsed.x}, Predicted: $${context.parsed.y}`;
164
+ }
165
+ }
166
+ }
167
+ },
168
+ scales: {
169
+ x: {
170
+ type: 'linear',
171
+ position: 'bottom',
172
+ title: {
173
+ display: true,
174
+ text: 'Actual Price ($)'
175
+ }
176
+ },
177
+ y: {
178
+ type: 'linear',
179
+ position: 'left',
180
+ title: {
181
+ display: true,
182
+ text: 'Predicted Price ($)'
183
+ }
184
+ }
185
+ }
186
+ }
187
+ });
188
+ }
189
+
190
+ // Helper function to get a random color for line charts
191
+ function getRandomColor() {
192
+ const letters = '0123456789ABCDEF';
193
+ let color = '#';
194
+ for (let i = 0; i < 6; i++) {
195
+ color += letters[Math.floor(Math.random() * 16)];
196
+ }
197
+ return color;
198
+ }
199
+
200
+ // --- IMPORTANT: How to get real data from your Flask/Python backend ---
201
+ // You would typically fetch data using JavaScript's Fetch API after the page loads,
202
+ // or by embedding data directly into the HTML from your Jinja2 template.
203
+
204
+ // Example of fetching data (if your Flask app has an /api/charts endpoint)
205
+ /*
206
+ fetch('/api/charts/coefficient_path_data')
207
+ .then(response => response.json())
208
+ .then(data => {
209
+ // Use 'data' to render your coefficient path chart
210
+ // e.g., update the Chart.js data object and call chart.update()
211
+ console.log("Received coefficient path data:", data);
212
+ })
213
+ .catch(error => console.error('Error fetching chart data:', error));
214
+ */
215
+
216
+ // Example of embedding data (if passed directly from Flask view)
217
+ // In your Flask view:
218
+ // return render_template('lasso_regression.html', prediction=..., chart_data_json=json.dumps(your_data))
219
+ // In lasso_regression.html:
220
+ // <script> const chartData = {{ chart_data_json | safe }}; </script>
221
+ // In lasso_charts.js:
222
+ // console.log(chartData); // Use this data directly for charts
223
+
224
+
225
+ });
Static/js/linear.js ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Get canvas and context
2
+ const canvas = document.getElementById('regressionCanvas');
3
+ const ctx = canvas.getContext('2d');
4
+
5
+ // Data from your Python script (X, y)
6
+ // These are hardcoded here for visualization purposes.
7
+ // In a real advanced app, these might be dynamically loaded.
8
+ const X_data = [1, 2, 3, 4, 5];
9
+ const y_data = [35, 45, 55, 65, 75];
10
+
11
+ // --- Understanding Slope (m) and Intercept (b) ---
12
+ // For a perfect linear relationship as in your data,
13
+ // we can manually calculate slope (m) and intercept (b).
14
+ // In a real-world scenario with scattered data, the scikit-learn
15
+ // LinearRegression model uses more advanced statistical methods
16
+ // (like Ordinary Least Squares) to find the 'best fit' line
17
+ // that minimizes the squared differences between actual and predicted y values.
18
+
19
+ // Calculate Slope (m):
20
+ // m = (y2 - y1) / (x2 - x1)
21
+ // Using points (1, 35) and (2, 45):
22
+ // m = (45 - 35) / (2 - 1) = 10 / 1 = 10
23
+ const slope = 10;
24
+
25
+ // Calculate Intercept (b):
26
+ // b = y - m * x
27
+ // Using point (1, 35) and calculated slope m=10:
28
+ // b = 35 - (10 * 1) = 35 - 10 = 25
29
+ const intercept = 25;
30
+
31
+ // Display slope and intercept values in the HTML
32
+ document.getElementById('slopeValue').textContent = slope.toFixed(2);
33
+ document.getElementById('interceptValue').textContent = intercept.toFixed(2);
34
+
35
+ // Canvas dimensions and padding
36
+ let canvasWidth, canvasHeight;
37
+ const padding = 50;
38
+
39
+ // Scale factors for drawing data onto the canvas
40
+ let xScale, yScale;
41
+ let xMin, xMax, yMin, yMax;
42
+
43
+ // Prediction variables (these will be updated when the user inputs hours)
44
+ let predictedHours = null;
45
+ let predictedScore = null;
46
+
47
+ // Function to set up scaling based on data range and canvas size
48
+ function setupScaling() {
49
+ canvasWidth = canvas.width;
50
+ canvasHeight = canvas.height;
51
+
52
+ // Determine data ranges for X and Y axes
53
+ xMin = Math.min(...X_data, 0); // Always start X-axis at 0
54
+ // Set xMax to at least 10 (as per the last request) and ensure it covers any new predicted hours
55
+ xMax = Math.max(...X_data, predictedHours !== null ? predictedHours : 0, 10) + 1; // Extend x-axis slightly beyond 10
56
+
57
+ yMin = Math.min(...y_data, 0); // Always start Y-axis at 0
58
+ // Calculate the predicted score for the determined xMax to ensure the y-axis covers the line
59
+ const maxPredictedY = slope * xMax + intercept;
60
+ yMax = Math.max(...y_data, predictedScore !== null ? predictedScore : 0, maxPredictedY) + 20; // Extend y-axis slightly beyond max needed
61
+
62
+ // Calculate scaling factors to fit data within the canvas padding
63
+ xScale = (canvasWidth - 2 * padding) / (xMax - xMin);
64
+ yScale = (canvasHeight - 2 * padding) / (yMax - yMin);
65
+ }
66
+
67
+ // Convert data coordinates (e.g., hours, score) to canvas pixel coordinates
68
+ function toCanvasX(x) {
69
+ return padding + (x - xMin) * xScale;
70
+ }
71
+
72
+ function toCanvasY(y) {
73
+ return canvasHeight - padding - (y - yMin) * yScale;
74
+ }
75
+
76
+ // Function to draw the entire graph, including data points, regression line, and predictions
77
+ function drawGraph() {
78
+ ctx.clearRect(0, 0, canvasWidth, canvasHeight); // Clear the entire canvas
79
+
80
+ // Draw axes
81
+ ctx.beginPath();
82
+ ctx.strokeStyle = '#64748b'; // Slate gray for axes
83
+ ctx.lineWidth = 2;
84
+
85
+ // X-axis (horizontal line)
86
+ ctx.moveTo(padding, toCanvasY(yMin));
87
+ ctx.lineTo(canvasWidth - padding, toCanvasY(yMin));
88
+ // Y-axis (vertical line)
89
+ ctx.moveTo(toCanvasX(xMin), padding);
90
+ ctx.lineTo(toCanvasX(xMin), canvasHeight - padding);
91
+ ctx.stroke();
92
+
93
+ // Draw axis labels and ticks
94
+ ctx.fillStyle = '#475569'; // Darker gray for labels
95
+ ctx.font = '14px Inter';
96
+ ctx.textAlign = 'center';
97
+ ctx.textBaseline = 'top';
98
+
99
+ // X-axis labels (Hours Studied)
100
+ // Dynamic tick step for clarity on different scales
101
+ const xTickStep = 1; // Every 1 hour for a graph up to 10
102
+ for (let i = Math.ceil(xMin / xTickStep) * xTickStep; i <= Math.floor(xMax); i += xTickStep) {
103
+ if (i >= 0) {
104
+ ctx.fillText(i + 'h', toCanvasX(i), canvasHeight - padding + 10);
105
+ ctx.beginPath();
106
+ ctx.moveTo(toCanvasX(i), canvasHeight - padding);
107
+ ctx.lineTo(toCanvasX(i), canvasHeight - padding - 5);
108
+ ctx.stroke();
109
+ }
110
+ }
111
+ // X-axis title
112
+ ctx.fillText('Hours Studied', canvasWidth / 2, canvasHeight - 20);
113
+
114
+ ctx.textAlign = 'right';
115
+ ctx.textBaseline = 'middle';
116
+ // Y-axis labels (Score)
117
+ // Dynamic tick step for clarity on different scales
118
+ const yTickStep = (yMax - yMin) / 10 > 20 ? 50 : 20; // Example: every 20 or 50 points
119
+ for (let i = Math.ceil(yMin / yTickStep) * yTickStep; i <= Math.floor(yMax); i += yTickStep) {
120
+ if (i >= 0) {
121
+ ctx.fillText(i.toFixed(0), padding - 10, toCanvasY(i));
122
+ ctx.beginPath();
123
+ ctx.moveTo(padding, toCanvasY(i));
124
+ ctx.lineTo(padding + 5, toCanvasY(i));
125
+ ctx.stroke();
126
+ }
127
+ }
128
+ // Y-axis title (rotated)
129
+ ctx.save();
130
+ ctx.translate(20, canvasHeight / 2);
131
+ ctx.rotate(-Math.PI / 2);
132
+ ctx.textAlign = 'center';
133
+ ctx.fillText('Score', 0, 0);
134
+ ctx.restore();
135
+
136
+
137
+ // Draw data points (blue circles)
138
+ ctx.fillStyle = '#3b82f6'; // Blue for data points
139
+ X_data.forEach((x, i) => {
140
+ ctx.beginPath();
141
+ ctx.arc(toCanvasX(x), toCanvasY(y_data[i]), 5, 0, Math.PI * 2); // Radius 5
142
+ ctx.fill();
143
+ });
144
+
145
+ // Draw regression line (red line)
146
+ ctx.beginPath();
147
+ ctx.strokeStyle = '#ef4444'; // Red for regression line
148
+ ctx.lineWidth = 3;
149
+ // Draw line across the entire X-axis range based on the model equation
150
+ ctx.moveTo(toCanvasX(xMin), toCanvasY(slope * xMin + intercept));
151
+ ctx.lineTo(toCanvasX(xMax), toCanvasY(slope * xMax + intercept));
152
+ ctx.stroke();
153
+
154
+ // Draw predicted point and lines if available (green point and dashed lines)
155
+ if (predictedHours !== null && predictedScore !== null) {
156
+ const predX = toCanvasX(predictedHours);
157
+ const predY = toCanvasY(predictedScore);
158
+
159
+ // Predicted point
160
+ ctx.fillStyle = '#22c55e'; // Green for predicted point
161
+ ctx.beginPath();
162
+ ctx.arc(predX, predY, 6, 0, Math.PI * 2); // Slightly larger radius
163
+ ctx.fill();
164
+
165
+ // Dotted lines to axes
166
+ ctx.strokeStyle = '#22c55e'; // Green for dotted lines
167
+ ctx.lineWidth = 1.5;
168
+ ctx.setLineDash([5, 5]); // Dotted line style
169
+
170
+ // Line from predicted point to X-axis
171
+ ctx.beginPath();
172
+ ctx.moveTo(predX, predY);
173
+ ctx.lineTo(predX, toCanvasY(yMin));
174
+ ctx.stroke();
175
+
176
+ // Line from predicted point to Y-axis
177
+ ctx.beginPath();
178
+ ctx.moveTo(predX, predY);
179
+ ctx.lineTo(toCanvasX(xMin), predY);
180
+ ctx.stroke();
181
+
182
+ ctx.setLineDash([]); // Reset line dash to solid for subsequent drawings
183
+ }
184
+ }
185
+
186
+ // Event listener for the "Predict Score" button click
187
+ document.getElementById('predictBtn').addEventListener('click', () => {
188
+ // Get the value from the input field and parse it as a floating-point number
189
+ const hoursInput = parseFloat(document.getElementById('hoursInput').value);
190
+
191
+ // Check if the input is a valid number
192
+ if (!isNaN(hoursInput)) {
193
+ // Update global prediction variables
194
+ predictedHours = hoursInput;
195
+ predictedScore = slope * predictedHours + intercept;
196
+
197
+ // Display the predicted score in the HTML
198
+ document.getElementById('predictedScore').textContent = predictedScore.toFixed(2);
199
+ // Make the prediction output box visible
200
+ document.getElementById('predictionOutput').classList.remove('hidden');
201
+
202
+ // Recalculate scaling and redraw the graph to accommodate new prediction if it extends axes
203
+ setupScaling();
204
+ drawGraph();
205
+ } else {
206
+ // If input is invalid, display an error message
207
+ const outputDiv = document.getElementById('predictionOutput');
208
+ outputDiv.innerHTML = '<p class="text-red-600">Please enter a valid number for hours studied.</p>';
209
+ outputDiv.classList.remove('hidden');
210
+ }
211
+ });
212
+
213
+ // Function to handle canvas resizing and redraw the graph
214
+ function resizeCanvas() {
215
+ // Get the device pixel ratio for sharper rendering on high-DPI screens
216
+ const dpi = window.devicePixelRatio;
217
+ // Get the actual rendered size of the canvas element from its CSS styles
218
+ const rect = canvas.getBoundingClientRect();
219
+
220
+ // Set the internal drawing buffer size of the canvas
221
+ canvas.width = rect.width * dpi;
222
+ canvas.height = rect.height * dpi;
223
+
224
+ // Scale the drawing context to match the DPI, ensuring crisp lines and text
225
+ ctx.scale(dpi, dpi);
226
+
227
+ // Re-setup scaling for data to canvas coordinates and redraw
228
+ setupScaling();
229
+ drawGraph();
230
+ }
231
+
232
+ // Initial setup and draw when the window loads
233
+ window.addEventListener('load', () => {
234
+ resizeCanvas(); // Set initial canvas size and draw
235
+ // Also trigger an initial prediction for the default value in the input field
236
+ const initialHours = parseFloat(document.getElementById('hoursInput').value);
237
+ if (!isNaN(initialHours)) {
238
+ predictedHours = initialHours;
239
+ predictedScore = slope * initialHours + intercept;
240
+ document.getElementById('predictedScore').textContent = predictedScore.toFixed(2);
241
+ document.getElementById('predictionOutput').classList.remove('hidden');
242
+ setupScaling();
243
+ drawGraph();
244
+ }
245
+ });
246
+
247
+ // Redraw the graph whenever the window is resized
248
+ window.addEventListener('resize', resizeCanvas);
249
+
250
+ // Optional: Allow clicking on canvas to set hours input (for quick testing)
251
+ canvas.addEventListener('click', (event) => {
252
+ // Get mouse click coordinates relative to the canvas
253
+ const rect = canvas.getBoundingClientRect();
254
+ const mouseX = (event.clientX - rect.left) / (canvas.width / canvas.getBoundingClientRect().width);
255
+ const mouseY = (event.clientY - rect.top) / (canvas.height / canvas.getBoundingClientRect().height); // Corrected this line
256
+
257
+ // Convert canvas X coordinate back to data X (hours studied)
258
+ const clickedHours = xMin + (mouseX - padding) / xScale;
259
+ // Update the input field with the clicked hours
260
+ document.getElementById('hoursInput').value = clickedHours.toFixed(1);
261
+ // Trigger the prediction immediately
262
+ document.getElementById('predictBtn').click();
263
+ });
Static/js/poly.js ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const canvas = document.getElementById("polyCanvas");
2
+ const ctx = canvas.getContext("2d");
3
+
4
+ const X_data = [1, 2, 3, 4, 5];
5
+ const y_data = [3, 8, 15, 24, 35];
6
+
7
+ function toCanvasX(x, xScale, padding) {
8
+ return padding + x * xScale;
9
+ }
10
+
11
+ function toCanvasY(y, yScale, padding, canvasHeight) {
12
+ return canvasHeight - padding - y * yScale;
13
+ }
14
+
15
+ function setupAndDraw(predX = null, predY = null) {
16
+ const padding = 50;
17
+ const canvasWidth = canvas.width = canvas.clientWidth;
18
+ const canvasHeight = canvas.height = canvas.clientHeight;
19
+
20
+ const xMax = 6;
21
+ const yMax = 40;
22
+
23
+ const xScale = (canvasWidth - 2 * padding) / xMax;
24
+ const yScale = (canvasHeight - 2 * padding) / yMax;
25
+
26
+ // Clear
27
+ ctx.clearRect(0, 0, canvasWidth, canvasHeight);
28
+
29
+ // Axes
30
+ ctx.beginPath();
31
+ ctx.moveTo(padding, toCanvasY(0, yScale, padding, canvasHeight));
32
+ ctx.lineTo(canvasWidth - padding, toCanvasY(0, yScale, padding, canvasHeight));
33
+ ctx.moveTo(toCanvasX(0, xScale, padding), padding);
34
+ ctx.lineTo(toCanvasX(0, xScale, padding), canvasHeight - padding);
35
+ ctx.strokeStyle = "#475569";
36
+ ctx.stroke();
37
+
38
+ // Points
39
+ ctx.fillStyle = "#3b82f6";
40
+ X_data.forEach((x, i) => {
41
+ ctx.beginPath();
42
+ ctx.arc(toCanvasX(x, xScale, padding), toCanvasY(y_data[i], yScale, padding, canvasHeight), 5, 0, 2 * Math.PI);
43
+ ctx.fill();
44
+ });
45
+
46
+ // Curve
47
+ ctx.beginPath();
48
+ ctx.moveTo(toCanvasX(0, xScale, padding), toCanvasY(0, yScale, padding, canvasHeight));
49
+ for (let x = 0; x <= xMax; x += 0.1) {
50
+ const y = x * x + 2 * x; // match your data (x^2 + 2x)
51
+ ctx.lineTo(toCanvasX(x, xScale, padding), toCanvasY(y, yScale, padding, canvasHeight));
52
+ }
53
+ ctx.strokeStyle = "#ef4444";
54
+ ctx.lineWidth = 2;
55
+ ctx.stroke();
56
+
57
+ // Predicted point
58
+ if (predX !== null && predY !== null) {
59
+ ctx.fillStyle = "#22c55e";
60
+ ctx.beginPath();
61
+ ctx.arc(toCanvasX(predX, xScale, padding), toCanvasY(predY, yScale, padding, canvasHeight), 6, 0, 2 * Math.PI);
62
+ ctx.fill();
63
+ }
64
+ }
65
+
66
+ // Prediction handler
67
+ function predict() {
68
+ const hours = parseFloat(document.getElementById("hoursInput").value);
69
+ fetch("/predict_poly", {
70
+ method: "POST",
71
+ body: JSON.stringify({ hours }),
72
+ headers: {
73
+ "Content-Type": "application/json"
74
+ }
75
+ })
76
+ .then(res => res.json())
77
+ .then(data => {
78
+ const score = data.prediction;
79
+ document.getElementById("predictedScore").textContent = score;
80
+ document.getElementById("predictionOutput").classList.remove("hidden");
81
+ setupAndDraw(hours, score);
82
+ });
83
+ }
84
+
85
+ window.onload = () => setupAndDraw();
Static/knn.js ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ let points = [
2
+ [2, 3, 0], [3, 4, 0], [1, 1, 0],
3
+ [7, 8, 1], [6, 9, 1], [8, 7, 1]
4
+ ]; // (x, y, label)
5
+ let testPoint = [4.5, 5.5];
6
+
7
+ const ctx = document.getElementById('knnChart').getContext('2d');
8
+ const colors = ['#1f77b4', '#ff7f0e', '#2ca02c'];
9
+
10
+ let chart = new Chart(ctx, {
11
+ type: 'scatter',
12
+ data: {
13
+ datasets: [
14
+ {
15
+ label: 'Class 0',
16
+ data: points.filter(p => p[2] === 0).map(p => ({ x: p[0], y: p[1] })),
17
+ backgroundColor: colors[0]
18
+ },
19
+ {
20
+ label: 'Class 1',
21
+ data: points.filter(p => p[2] === 1).map(p => ({ x: p[0], y: p[1] })),
22
+ backgroundColor: colors[1]
23
+ },
24
+ {
25
+ label: 'Test Point',
26
+ data: [{ x: testPoint[0], y: testPoint[1] }],
27
+ backgroundColor: 'black',
28
+ pointStyle: 'triangle',
29
+ radius: 7
30
+ }
31
+ ]
32
+ },
33
+ options: {
34
+ responsive: true,
35
+ plugins: {
36
+ legend: { position: 'top' },
37
+ title: { display: true, text: 'KNN Classification Plot' }
38
+ },
39
+ scales: {
40
+ x: { type: 'linear', position: 'bottom' },
41
+ y: { type: 'linear' }
42
+ }
43
+ }
44
+ });
45
+
46
+ async function sendToServer() {
47
+ const k = document.getElementById('k-value').value;
48
+
49
+ const response = await fetch('/knn_visual_predict', {
50
+ method: 'POST',
51
+ headers: { 'Content-Type': 'application/json' },
52
+ body: JSON.stringify({ points, test_point: testPoint, k })
53
+ });
54
+
55
+ const result = await response.json();
56
+
57
+ document.getElementById('output').innerHTML =
58
+ `Prediction: <strong>Class ${result.prediction}</strong>`;
59
+
60
+ // Highlight neighbors
61
+ const neighborLayer = {
62
+ label: 'Nearest Neighbors',
63
+ data: result.neighbors.map(p => ({ x: p[0], y: p[1] })),
64
+ backgroundColor: '#d62728',
65
+ pointStyle: 'rect',
66
+ radius: 6
67
+ };
68
+
69
+ chart.data.datasets = chart.data.datasets.slice(0, 3).concat([neighborLayer]);
70
+ chart.update();
71
+ }
Static/svr_linear.png ADDED
Static/svr_poly.png ADDED
Static/svr_rbf.png ADDED
Static/uploads/Figure_1.png ADDED
Static/uploads/compressed_clean.jpg ADDED
Static/uploads/digit_0.png ADDED
Static/uploads/digit_4.png ADDED
Static/uploads/download.jpg ADDED
Static/uploads/download.png ADDED
Static/uploads/download_1.jpg ADDED
Static/uploads/download_2.jpg ADDED
Static/uploads/input.jpg ADDED
Static/uploads/kmeans.png ADDED
Static/uploads/test_digit.png ADDED
Static/uploads/test_digit_8.png ADDED
Static/uploads/test_digit_8_1.png ADDED
app.py ADDED
@@ -0,0 +1,2373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template, request, jsonify
2
+ import numpy as np
3
+ import pandas as pd
4
+ import joblib
5
+ import os
6
+ from sklearn.svm import SVR
7
+ from sklearn.model_selection import train_test_split
8
+ from sklearn.metrics import mean_squared_error, r2_score
9
+ from sklearn.neighbors import KNeighborsClassifier
10
+ from sklearn.preprocessing import StandardScaler
11
+ from sklearn.ensemble import RandomForestClassifier
12
+ from sklearn.tree import DecisionTreeClassifier
13
+ from sklearn import svm
14
+ from sklearn.naive_bayes import GaussianNB # <--- Add this import
15
+ from sklearn.feature_extraction.text import CountVectorizer
16
+ from textblob import TextBlob
17
+ import traceback
18
+ from flask_cors import CORS
19
+ from werkzeug.utils import secure_filename # For secure file names
20
+ import io # To read CSV from memory
21
+ import re
22
+ from sklearn.cluster import KMeans, DBSCAN
23
+ from PIL import Image
24
+ import matplotlib.pyplot as plt
25
+ from joblib import load # ✅ This is the missing line
26
+ import traceback
27
+ import pickle
28
+ from sklearn.svm import SVC
29
+ from sklearn.datasets import make_classification
30
+ import plotly.graph_objs as go
31
+ import json
32
+ import requests
33
+ from PIL import Image
34
+
35
+
36
+ # from transformers import pipeline
37
+ from dotenv import load_dotenv
38
+ import os
39
+ from urllib.parse import urlparse
40
+ import tldextract
41
+ import string
42
+
43
+
44
+ #chatbotcode
45
+ import zipfile
46
+ import gdown
47
+ import torch
48
+ from transformers import AutoTokenizer, AutoModelForCausalLM
49
+ from peft import PeftModel
50
+
51
+ # #login
52
+ # from flask import Flask
53
+ # from flask_jwt_extended import JWTManager
54
+ # from flask_login import LoginManager
55
+ # from flask_mail import Mail
56
+ # from flask_login import LoginManager
57
+ # from flask_sqlalchemy import SQLAlchemy
58
+ # from flask_mail import Mail
59
+ # from auth.models import db, User
60
+ # from auth.routes import auth
61
+ # from flask_login import login_required
62
+
63
+
64
+
65
+
66
+ #chatbotcode
67
+
68
+ # from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
69
+
70
+ # model_name = "microsoft/deberta-v3-small"
71
+
72
+ # tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
73
+ # model = AutoModelForSequenceClassification.from_pretrained(model_name)
74
+
75
+ # bert_checker = pipeline("text-classification", model=model, tokenizer=tokenizer)
76
+
77
+ # Load environment variables from .env
78
+ load_dotenv()
79
+ #spam url import relateted
80
+ import nltk, os
81
+
82
+ # Tell NLTK to also check the local nltk_data folder
83
+ nltk.data.path.append(os.path.join(os.path.dirname(__file__), "nltk_data"))
84
+
85
+ from nltk.corpus import words
86
+
87
+ # Load the words corpus
88
+ valid_words = set(words.words())
89
+ print("engineering" in valid_words) # ✅ Should be True
90
+ print("engineerigfnnxng" in valid_words) # ❌ Should be False
91
+ import wordninja # Function to split words into valid parts
92
+ import re
93
+ from urllib.parse import urlparse
94
+ from spellchecker import SpellChecker
95
+
96
+ import wordninja
97
+ # end urlspam
98
+ import google.generativeai as genai
99
+
100
+ # app.py
101
+ # import streamlit as st
102
+ # from load_file import load_file
103
+
104
+ # st.title("Download HuggingFace Repo Files in Streamlit")
105
+
106
+ # filename = st.text_input("Enter filename from repo:", "model.safetensors")
107
+
108
+ # if st.button("Download"):
109
+ # try:
110
+ # local_path = load_file(filename)
111
+ # st.success(f"✅ File downloaded to: {local_path}")
112
+ # st.write("You can now use this file in your app.")
113
+ # except Exception as e:
114
+ # st.error(f"❌ Error: {str(e)}")
115
+
116
+
117
+ # Set API key (no need to assign OpenAI() to client like that)
118
+ # openai.api_key = os.getenv("OPENAI_API_KEY")
119
+
120
+ # def ask_openai_scientific_validation(statement):
121
+ # prompt = f"""Assess the scientific accuracy of: "{statement}"\nRespond with ✅ (possible) or ❌ (impossible), and explain simply."""
122
+
123
+ # try:
124
+ # client = OpenAI() # This is correct placement
125
+ # response = client.chat.completions.create(
126
+ # model="gpt-3.5-turbo",
127
+ # messages=[
128
+ # {"role": "system", "content": "You are a scientific fact-checker."},
129
+ # {"role": "user", "content": prompt}
130
+ # ],
131
+ # temperature=0.7,
132
+ # max_tokens=150
133
+ # )
134
+
135
+
136
+ # return response.choices[0].message.content.strip()
137
+
138
+ # except Exception as e:
139
+ # return f"⚠️ Could not verify:\n\n{str(e)}"
140
+
141
+
142
+ #huggung face code start
143
+
144
+
145
+ # # =====================
146
+ # # Replace your old model loads with this:
147
+ # # =====================
148
+
149
+ # # Models
150
+ # knn_model = load_file("Models/knn_model.pkl")
151
+ # lasso_model = load_file("Models/lasso_model.pkl")
152
+ # liar_model = load_file("Models/liar_model.joblib")
153
+ # linear_model = load_file("Models/linear_model.pkl")
154
+ # logistic_model = load_file("Models/logistic_model.pkl")
155
+ # nb_url_model = load_file("Models/nb_url_model.pkl")
156
+ # poly_model = load_file("Models/poly_model.pkl")
157
+ # rf_model = load_file("Models/rf_model.pkl")
158
+ # ridge_model = load_file("Models/ridge_model.pkl")
159
+ # supervised_model = load_file("Models/supervised_model.pkl")
160
+ # svr_model = load_file("Models/svr_model.pkl")
161
+ # voting_url_model = load_file("Models/voting_url_model.pkl")
162
+
163
+ # # Vectorizers / Encoders / Scalers
164
+ # label_classes = load_file("Models/label_classes.npy")
165
+ # label_encoder = load_file("Models/label_encoder.pkl")
166
+ # lasso_scaler = load_file("Models/lasso_scaler.pkl")
167
+ # liar_vectorizer = load_file("Models/liar_vectorizer.joblib")
168
+ # nb_url_vectorizer = load_file("Models/nb_url_vectorizer.pkl")
169
+ # poly_transform = load_file("Models/poly_transform.pkl")
170
+ # ridge_scaler = load_file("Models/ridge_scaler.pkl")
171
+ # svr_scaler_X = load_file("Models/svr_scaler_X.pkl")
172
+ # svr_scaler_y = load_file("Models/svr_scaler_y.pkl")
173
+ # tfidf_vectorizer = load_file("Models/tfidf_vectorizer.pkl")
174
+ # url_vectorizer = load_file("Models/url_vectorizer.pkl")
175
+ # vectorizer_joblib = load_file("Models/vectorizer.joblib")
176
+ # vectorizer_pkl = load_file("Models/vectorizer.pkl")
177
+ # # huggung face code end
178
+
179
+ MODEL_DIR = "Models"
180
+ DATA_DIR = "housedata" # Assuming your house data is here
181
+ UPLOAD_FOLDER = 'static/uploads' # NEW: Folder for temporary user uploads
182
+
183
+ app = Flask(__name__)
184
+ app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
185
+ CORS(app)
186
+
187
+
188
+ REPO_ID = "deedrop1140/nero-ml"
189
+ MODEL_DIR = "Models"
190
+
191
+ def load_file(filename):
192
+ """Try to load model from local folder; if missing, download from Hugging Face Hub."""
193
+ local_path = os.path.join(MODEL_DIR, filename)
194
+
195
+ # 1️⃣ Check if file exists locally
196
+ if os.path.exists(local_path):
197
+ file_path = local_path
198
+ else:
199
+ # 2️⃣ Download from Hugging Face (Render case)
200
+ file_path = hf_hub_download(repo_id=REPO_ID, filename=filename)
201
+
202
+ # 3️⃣ Load based on file extension
203
+ if filename.endswith((".pkl", ".joblib")):
204
+ return joblib.load(file_path)
205
+ elif filename.endswith(".npy"):
206
+ return np.load(file_path, allow_pickle=True)
207
+ elif filename.endswith((".pt", ".pth")):
208
+ return torch.load(file_path, map_location="cpu")
209
+ else:
210
+ return file_path
211
+
212
+
213
+ #flasklogin
214
+
215
+
216
+ # app.config["JWT_SECRET_KEY"] = "jwt-secret-key"
217
+ # jwt = JWTManager(app)
218
+
219
+
220
+
221
+ #authstart
222
+ # app.config["SECRET_KEY"] = "super-secret"
223
+ # app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///users.db"
224
+
225
+ # Mail
226
+ # app.config["MAIL_SERVER"] = "smtp.gmail.com"
227
+ # app.config["MAIL_PORT"] = 587
228
+ # app.config["MAIL_USE_TLS"] = True
229
+ # app.config["MAIL_USERNAME"] = "your_email@gmail.com"
230
+ # app.config["MAIL_PASSWORD"] = "app_password"
231
+
232
+ # mail = Mail(app)
233
+
234
+ # login_manager = LoginManager(app)
235
+ # login_manager.login_view = "auth.login"
236
+ # db.init_app(app)
237
+ # app.register_blueprint(auth)
238
+ # jwt = JWTManager(app)
239
+ # mail = Mail(app)
240
+
241
+ # @login_manager.user_loader
242
+ # def load_user(user_id):
243
+ # return User.query.get(int(user_id))
244
+
245
+ # with app.app_context():
246
+ # db.create_all()
247
+ #authend
248
+
249
+
250
+ #chatbotcode
251
+ # deedrop1140/qwen-ml-tutor-assets
252
+ from transformers import (
253
+ AutoTokenizer,
254
+ AutoModelForCausalLM,
255
+ StoppingCriteria,
256
+ StoppingCriteriaList
257
+ )
258
+ from peft import PeftModel
259
+ from huggingface_hub import hf_hub_download
260
+ import zipfile
261
+ from transformers import TextIteratorStreamer
262
+ import threading
263
+ from flask import Response
264
+
265
+
266
+ # ======================
267
+ # CONFIG
268
+ # ======================
269
+ BASE_MODEL = "Qwen/Qwen2.5-1.5B"
270
+ DATASET_REPO = "deedrop1140/qwen-ml-tutor-assets"
271
+ ZIP_NAME = "qwen-ml-tutor-best-20251213T015537Z-1-001.zip"
272
+ MODEL_DIR = "qwen-ml-tutor-best"
273
+
274
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
275
+
276
+ # ======================
277
+ # FLASK APP
278
+ # ======================
279
+ app = Flask(__name__)
280
+
281
+ # ======================
282
+ # DOWNLOAD MODEL ASSETS
283
+ # ======================
284
+ if not os.path.exists(MODEL_DIR):
285
+ print("⬇️ Downloading LoRA adapter...")
286
+ zip_path = hf_hub_download(
287
+ repo_id=DATASET_REPO,
288
+ filename=ZIP_NAME,
289
+ repo_type="dataset"
290
+ )
291
+ print("📦 Extracting adapter...")
292
+ with zipfile.ZipFile(zip_path, "r") as z:
293
+ z.extractall(".")
294
+ print("✅ Adapter ready")
295
+
296
+ # ======================
297
+ # TOKENIZER (BASE MODEL)
298
+ # ======================
299
+ # ======================
300
+ # LOAD TOKENIZER (FROM LORA MODEL)
301
+ # ======================
302
+ tokenizer = AutoTokenizer.from_pretrained(
303
+ MODEL_DIR,
304
+ trust_remote_code=True
305
+ )
306
+
307
+ if tokenizer.pad_token_id is None:
308
+ tokenizer.pad_token = tokenizer.eos_token
309
+
310
+ # ======================
311
+ # LOAD BASE MODEL
312
+ # ======================
313
+ base_model = AutoModelForCausalLM.from_pretrained(
314
+ BASE_MODEL,
315
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
316
+ trust_remote_code=True
317
+ )
318
+
319
+ # 🔥 THIS LINE IS THE FIX (DO NOT SKIP)
320
+ base_model.resize_token_embeddings(len(tokenizer))
321
+
322
+ # MOVE MODEL TO DEVICE
323
+ device = "cuda" if torch.cuda.is_available() else "cpu"
324
+ base_model = base_model.to(device)
325
+
326
+ # ======================
327
+ # LOAD LORA ADAPTER
328
+ # ======================
329
+ llm_model = PeftModel.from_pretrained(
330
+ base_model,
331
+ MODEL_DIR,
332
+ is_trainable=False
333
+ )
334
+
335
+ llm_model.eval()
336
+
337
+ print("✅ Model loaded successfully")
338
+
339
+ # ======================
340
+ # STOPPING CRITERIA
341
+ # ======================
342
+ class StopOnStrings(StoppingCriteria):
343
+ def __init__(self, tokenizer, stop_strings):
344
+ self.tokenizer = tokenizer
345
+ self.stop_ids = [
346
+ tokenizer.encode(s, add_special_tokens=False)
347
+ for s in stop_strings
348
+ ]
349
+
350
+ def __call__(self, input_ids, scores, **kwargs):
351
+ for stop in self.stop_ids:
352
+ if len(input_ids[0]) >= len(stop):
353
+ if input_ids[0][-len(stop):].tolist() == stop:
354
+ return True
355
+ return False
356
+
357
+ stop_criteria = StoppingCriteriaList([
358
+ StopOnStrings(
359
+ tokenizer,
360
+ stop_strings=["User:", "Instruction:", "Question:"]
361
+ )
362
+ ])
363
+
364
+ # =============================
365
+ # ROUTES
366
+ # =============================
367
+ @app.route("/chatbot")
368
+ def chatbot():
369
+ return render_template("chatbot.html", active_page="chatbot")
370
+
371
+ @app.route("/chat", methods=["POST"])
372
+ def chat():
373
+ data = request.json
374
+ user_msg = data.get("message", "").strip()
375
+
376
+ if not user_msg:
377
+ return jsonify({"reply": "Please ask a machine learning question."})
378
+
379
+ prompt = f"""Instruction: Answer the following question clearly.
380
+ Do NOT ask follow-up questions.
381
+ Do NOT continue the conversation.
382
+ Question: {user_msg}
383
+ Answer:"""
384
+
385
+ inputs = tokenizer(prompt, return_tensors="pt").to(DEVICE)
386
+
387
+ streamer = TextIteratorStreamer(
388
+ tokenizer,
389
+ skip_prompt=True,
390
+ skip_special_tokens=True
391
+ )
392
+
393
+ generation_kwargs = dict(
394
+ **inputs,
395
+ max_new_tokens=200,
396
+ temperature=0.3,
397
+ top_p=0.9,
398
+ do_sample=True,
399
+ eos_token_id=tokenizer.eos_token_id,
400
+ pad_token_id=tokenizer.eos_token_id,
401
+ stopping_criteria=stop_criteria,
402
+ streamer=streamer
403
+ )
404
+
405
+ # Run generation in background thread
406
+ thread = threading.Thread(
407
+ target=llm_model.generate,
408
+ kwargs=generation_kwargs
409
+ )
410
+ thread.start()
411
+
412
+ def event_stream():
413
+ for token in streamer:
414
+ yield f"data: {token}\n\n"
415
+
416
+ yield "data: [DONE]\n\n"
417
+
418
+ return Response(
419
+ event_stream(),
420
+ mimetype="text/event-stream"
421
+ )
422
+
423
+
424
+
425
+ #chatbotcode
426
+
427
+ genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
428
+
429
+ def ask_gemini(statement):
430
+ model = genai.GenerativeModel("gemini-2.0-flash-001")
431
+ response = model.generate_content(f"Verify this statement for truth: {statement}")
432
+ return response.text
433
+
434
+ #rfc
435
+ # model = load("Models/liar_model.joblib")
436
+ # vectorizer = load("Models/liar_vectorizer.joblib")
437
+
438
+ # Load BERT fact-checker pipeline (local model)
439
+ # bert_checker = pipeline("text-classification", model="microsoft/deberta-v3-small")
440
+
441
+ #endrfc
442
+
443
+ #svm
444
+
445
+ # ==== SVM Setup ====
446
+ X, y = make_classification(n_samples=100, n_features=2, n_redundant=0,
447
+ n_clusters_per_class=1, n_classes=2, random_state=42)
448
+ scaler = StandardScaler()
449
+ X = scaler.fit_transform(X)
450
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
451
+
452
+ # Train SVM
453
+ svm_model = SVC(kernel="linear")
454
+ svm_model.fit(X_train, y_train)
455
+
456
+ #endsvm
457
+ #deision tree
458
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
459
+ GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
460
+ #end deision tree
461
+
462
+ # Ensure directories exist
463
+ os.makedirs(MODEL_DIR, exist_ok=True)
464
+ os.makedirs(DATA_DIR, exist_ok=True)
465
+ os.makedirs(UPLOAD_FOLDER, exist_ok=True) # NEW: Create upload folder
466
+
467
+ def clean_text(text):
468
+ if pd.isnull(text):
469
+ return ""
470
+ text = text.lower()
471
+ text = re.sub(r"http\S+|www\S+|https\S+", '', text)
472
+ text = text.translate(str.maketrans('', '', string.punctuation))
473
+ text = re.sub(r'\d+', '', text)
474
+ text = re.sub(r'\s+', ' ', text).strip()
475
+ return text
476
+
477
+ # --- Helper functions for data generation (conceptual for demo) ---
478
+ def generate_linear_data(n_samples=100, noise=0.5):
479
+ X = np.sort(np.random.rand(n_samples) * 10).reshape(-1, 1)
480
+ y = 2 * X.squeeze() + 5 + noise * np.random.randn(n_samples)
481
+ return X, y
482
+
483
+ def generate_non_linear_data(n_samples=100, noise=0.5):
484
+ X = np.sort(np.random.rand(n_samples) * 10).reshape(-1, 1)
485
+ y = np.sin(X.squeeze()) * 10 + noise * np.random.randn(n_samples)
486
+ return X, y
487
+
488
+ def generate_noisy_data(n_samples=100, noise_factor=3.0):
489
+ X = np.sort(np.random.rand(n_samples) * 10).reshape(-1, 1)
490
+ y = 2 * X.squeeze() + 5 + noise_factor * np.random.randn(n_samples) # Increased noise
491
+ return X, y
492
+
493
+ # Function to generate house price data (using your existing data structure for consistency)
494
+ def get_house_data():
495
+ try:
496
+ df = pd.read_csv(os.path.join(DATA_DIR, 'train.csv'))
497
+ # Using a subset of features for simplicity in demo
498
+ features = ['GrLivArea', 'OverallQual', 'GarageCars', 'TotalBsmtSF', 'YearBuilt']
499
+ # Check if all required columns exist
500
+ if not all(col in df.columns for col in features + ['SalePrice']):
501
+ print("Warning: Missing one or more required columns in train.csv for house data.")
502
+ return None, None
503
+ X = df[features]
504
+ y = df['SalePrice']
505
+ return X, y
506
+ except FileNotFoundError:
507
+ print(f"Error: train.csv not found in {DATA_DIR}. Please ensure your data is there.")
508
+ return None, None
509
+ except Exception as e:
510
+ print(f"Error loading house data: {e}")
511
+ return None, None
512
+
513
+ # Dictionary to hold all loaded models
514
+ loaded_models = {}
515
+
516
+ # Load logistic model and vectorizer for SMS
517
+ # vectorizer = joblib.load("Models/logvectorizer.pkl")
518
+ # model = joblib.load("Models/logistic_model.pkl")
519
+ # vectorizer = load_file("Models/logvectorizer.pkl")
520
+ # model = load_file("Models/logistic_model.pkl")
521
+
522
+
523
+ # # Load models once NB+DT+SVM is trained
524
+ # try:
525
+ # model = load_file("Models/logistic_model.pkl")
526
+ # # vectorizer = joblib.load("Models/logvectorizer.pkl")
527
+ # # model = joblib.load("Models/logistic_model.pkl")
528
+ # vectorizer = load_file("Models/vectorizer.pkl")
529
+ # print("✅ Model and vectorizer loaded into memory successfully!")
530
+ # except Exception as e:
531
+ # vectorizer = None
532
+ # model = None
533
+ # print(f"❌ Error: Could not load model or vectorizer. Please check your file paths. Error: {e}")
534
+ # #END NB+DT+SVM
535
+
536
+ # === Naive Bayes URL Spam Classifier (NB_spam.html) ===
537
+ # === Load Model & Vectorizer ===
538
+
539
+
540
+
541
+ # VT_API_KEY = os.getenv("VT_API_KEY")
542
+ # nb_model = load_file("Models/nb_url_model.pkl")
543
+ # vectorizer = load_file("Models/nb_url_vectorizer.pkl")
544
+
545
+ # if nb_model is not None and vectorizer is not None:
546
+ # print("✅ Loaded model and vectorizer.")
547
+ # else:
548
+ # print("❌ Model or vectorizer not found.")
549
+
550
+
551
+
552
+
553
+
554
+
555
+ def load_all_models():
556
+ """
557
+ Loads all necessary models into the loaded_models dictionary when the app starts.
558
+ """
559
+ global loaded_models
560
+
561
+ # Load Supervised Model
562
+ # Load Supervised Model
563
+ try:
564
+ supervised_model_path = load_file("linear_model.pkl")
565
+
566
+ # Debug: check what load_file actually returned
567
+ print("DEBUG -> supervised_model_path type:", type(supervised_model_path))
568
+
569
+ # If load_file returned a path (string), load with joblib
570
+ if isinstance(supervised_model_path, str):
571
+ loaded_models['supervised'] = joblib.load(supervised_model_path)
572
+ else:
573
+ # If load_file already returned the model object
574
+ loaded_models['supervised'] = supervised_model_path
575
+
576
+ print("Supervised model loaded successfully")
577
+
578
+ except FileNotFoundError:
579
+ print(f"Error: Supervised model file not found at {supervised_model_path}. "
580
+ "Please run train_model.py first.")
581
+ loaded_models['supervised'] = None # Mark as not loaded
582
+ except Exception as e:
583
+ print(f"Error loading supervised model: {e}")
584
+ loaded_models['supervised'] = None
585
+
586
+
587
+ # Load models when Flask app context is ready
588
+ with app.app_context():
589
+ load_all_models()
590
+
591
+ @app.route('/')
592
+ def frontpage():
593
+ return render_template('frontpage.html')
594
+ @app.route('/home')
595
+ def home():
596
+ return render_template('home.html')
597
+
598
+ @app.route('/Optimization')
599
+ def Optimization():
600
+ return render_template('Optimization.html', active_page='Optimization')
601
+
602
+ @app.route('/supervise')
603
+ def supervise():
604
+ return render_template('supervise.html', active_page='supervise')
605
+
606
+
607
+ @app.route('/unsupervised')
608
+ def unsupervised():
609
+ return render_template('unsupervised.html', active_page='unsupervised')
610
+
611
+ # Semi-Supervised Learning page
612
+ @app.route('/semi-supervised')
613
+ def semi_supervised():
614
+ return render_template('semi_supervised.html', active_page='semi_supervised')
615
+
616
+ # Reinforcement Learning page
617
+ @app.route('/reinforcement')
618
+ def reinforcement():
619
+ return render_template('reinforcement.html', active_page='reinforcement')
620
+
621
+ # Ensemble Learning page
622
+ @app.route('/ensemble')
623
+ def ensemble():
624
+ return render_template('ensemble.html', active_page='ensemble')
625
+
626
+
627
+ @app.route('/supervised', methods=['GET', 'POST'])
628
+ def supervised():
629
+ prediction = None
630
+ hours_studied_input = None
631
+
632
+ if loaded_models['supervised'] is None:
633
+ return "Error: Supervised model could not be loaded. Please check server logs.", 500
634
+
635
+ if request.method == 'POST':
636
+ try:
637
+ hours_studied_input = float(request.form['hours'])
638
+ input_data = np.array([[hours_studied_input]])
639
+
640
+ predicted_score = loaded_models['supervised'].predict(input_data)[0]
641
+ prediction = round(predicted_score, 2)
642
+
643
+ except ValueError:
644
+ print("Invalid input for hours studied.")
645
+ prediction = "Error: Please enter a valid number."
646
+ except Exception as e:
647
+ print(f"An error occurred during prediction: {e}")
648
+ prediction = "Error during prediction."
649
+
650
+ return render_template('supervised.html', prediction=prediction, hours_studied_input=hours_studied_input)
651
+
652
+
653
+ @app.route('/polynomial', methods=['GET', 'POST'])
654
+ def polynomial():
655
+ if request.method == 'POST':
656
+ try:
657
+ hours = float(request.form['hours'])
658
+
659
+ # model = joblib.load('Models/poly_model.pkl')
660
+ # poly = joblib.load('Models/poly_transform.pkl')
661
+ # model = load_file("Models/poly_model.pkl")
662
+ # poly= load_file("Models/poly_transform.pkl")
663
+ model = load_file("poly_model.pkl")
664
+ poly= load_file("poly_transform.pkl")
665
+
666
+ transformed_input = poly.transform([[hours]])
667
+ prediction = model.predict(transformed_input)[0]
668
+
669
+ return render_template("poly.html", prediction=round(prediction, 2), hours=hours)
670
+
671
+ except Exception as e:
672
+ print(f"Error: {e}")
673
+ return render_template("poly.html", error="Something went wrong.")
674
+
675
+ return render_template("poly.html")
676
+
677
+
678
+ @app.route('/random_forest', methods=['GET', 'POST'])
679
+ def random_forest():
680
+ if request.method == 'POST':
681
+ try:
682
+ hours = float(request.form['hours'])
683
+ model = load_file("rf_model.pkl")
684
+ # model = joblib.load('Models/rf_model.pkl')
685
+ prediction = model.predict([[hours]])[0]
686
+
687
+ return render_template("rf.html", prediction=round(prediction, 2), hours=hours)
688
+ except Exception as e:
689
+ print(f"[ERROR] {e}")
690
+ return render_template("rf.html", error="Prediction failed. Check your input.")
691
+ return render_template("rf.html")
692
+
693
+ @app.route('/prediction_flow')
694
+ def prediction_flow():
695
+ return render_template('prediction_flow.html')
696
+
697
+ @app.route("/lasso", methods=["GET", "POST"])
698
+ def lasso():
699
+ if request.method == "POST":
700
+ try:
701
+ inputs = [float(request.form.get(f)) for f in ['OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'YearBuilt']]
702
+
703
+ # model = load_file("Models/lasso_model.pkl")
704
+ # scaler = load_file("Models/lasso_scaler.pkl")
705
+ # model = joblib.load("Models/lasso_model.pkl")
706
+ # scaler = joblib.load("Models/lasso_scaler.pkl")
707
+ model = load_file("lasso_model.pkl")
708
+ scaler = load_file("lasso_scaler.pkl")
709
+
710
+ scaled_input = scaler.transform([inputs])
711
+
712
+ prediction = model.predict(scaled_input)[0]
713
+ return render_template("lasso.html", prediction=round(prediction, 2))
714
+
715
+ except Exception as e:
716
+ return render_template("lasso.html", error=str(e))
717
+
718
+ return render_template("lasso.html")
719
+
720
+
721
+ @app.route('/ridge', methods=['GET', 'POST'])
722
+ def ridge():
723
+ prediction = None
724
+ error = None
725
+
726
+ try:
727
+ # model = load_file("Models/ridge_model.pkl")
728
+ # scaler = load_file("Models/ridge_scaler.pkl")
729
+ # model = joblib.load(os.path.join(MODEL_DIR, 'ridge_model.pkl'))
730
+ # scaler = joblib.load(os.path.join(MODEL_DIR, 'ridge_scaler.pkl'))
731
+
732
+ model = load_file("ridge_model.pkl")
733
+ scaler = load_file("ridge_scaler.pkl")
734
+
735
+
736
+ except Exception as e:
737
+ return f"❌ Error loading Ridge model: {e}", 500
738
+
739
+ if request.method == 'POST':
740
+ try:
741
+ features = ['OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'YearBuilt']
742
+ input_data = [float(request.form[feature]) for feature in features]
743
+ input_scaled = scaler.transform([input_data])
744
+ prediction = model.predict(input_scaled)[0]
745
+ except Exception as e:
746
+ error = str(e)
747
+
748
+ return render_template('ridge.html', prediction=prediction, error=error)
749
+
750
+ @app.route('/dtr', methods=['GET', 'POST'])
751
+ def dtr():
752
+ if request.method == 'GET':
753
+ return render_template('dtr.html')
754
+
755
+ if request.method == 'POST':
756
+ data = request.get_json()
757
+ data_points = data.get('dataPoints') if data else None
758
+ print("Received data:", data_points)
759
+ return jsonify({'message': 'Data received successfully!', 'receivedData': data_points})
760
+
761
+
762
+ @app.route('/dtrg')
763
+ def drg():
764
+ return render_template('desiciongame.html')
765
+
766
+ # --- SVR Routes ---
767
+ @app.route('/svr') # This route is for the initial GET request to load the page
768
+ def svr_page():
769
+ return render_template('svr.html')
770
+
771
+ # @app.route('/decision-tree')
772
+ # def decision_tree():
773
+ # return render_template('decision-Tree.html')
774
+
775
+ # @app.route('/decision-tree-game')
776
+ # def decision_tree_game():
777
+ # return render_template('Decision-Tree-Game.html')
778
+
779
+
780
+ @app.route('/run_svr_demo', methods=['POST'])
781
+ def run_svr_demo():
782
+ try:
783
+ # Check if the request contains JSON (for predefined datasets) or FormData (for file uploads)
784
+ if request.is_json:
785
+ data = request.json
786
+ else:
787
+ # For FormData, data is accessed via request.form for fields, request.files for files
788
+ data = request.form
789
+
790
+ dataset_type = data.get('dataset_type', 'linear')
791
+ kernel_type = data.get('kernel', 'rbf')
792
+ C_param = float(data.get('C', 1.0))
793
+ gamma_param = float(data.get('gamma', 0.1))
794
+ epsilon_param = float(data.get('epsilon', 0.1))
795
+
796
+ X, y = None, None
797
+
798
+ if dataset_type == 'linear':
799
+ X, y = generate_linear_data()
800
+ elif dataset_type == 'non_linear':
801
+ X, y = generate_non_linear_data()
802
+ elif dataset_type == 'noisy':
803
+ X, y = generate_noisy_data()
804
+ elif dataset_type == 'house_data':
805
+ X_house, y_house = get_house_data()
806
+ if X_house is not None and not X_house.empty:
807
+ X = X_house[['GrLivArea']].values # Only GrLivArea for simple 1D plotting
808
+ y = y_house.values
809
+ else:
810
+ X, y = generate_linear_data() # Fallback if house data is missing/invalid
811
+ elif dataset_type == 'custom_csv': # NEW: Handle custom CSV upload
812
+ uploaded_file = request.files.get('file')
813
+ x_column_name = data.get('x_column_name')
814
+ y_column_name = data.get('y_column_name')
815
+
816
+ if not uploaded_file or uploaded_file.filename == '':
817
+ return jsonify({'error': 'No file uploaded for custom CSV.'}), 400
818
+ if not x_column_name or not y_column_name:
819
+ return jsonify({'error': 'X and Y column names are required for custom CSV.'}), 400
820
+
821
+ try:
822
+ # Read CSV into a pandas DataFrame from in-memory BytesIO object
823
+ df = pd.read_csv(io.BytesIO(uploaded_file.read()))
824
+
825
+ if x_column_name not in df.columns or y_column_name not in df.columns:
826
+ missing_cols = []
827
+ if x_column_name not in df.columns: missing_cols.append(x_column_name)
828
+ if y_column_name not in df.columns: missing_cols.append(y_column_name)
829
+ return jsonify({'error': f"Missing columns in uploaded CSV: {', '.join(missing_cols)}"}), 400
830
+
831
+ X = df[[x_column_name]].values # Ensure X is 2D for scikit-learn
832
+ y = df[y_column_name].values
833
+ except Exception as e:
834
+ return jsonify({'error': f"Error reading or processing custom CSV: {str(e)}"}), 400
835
+ else: # Fallback for unknown dataset types
836
+ X, y = generate_linear_data()
837
+
838
+
839
+ if X is None or y is None or len(X) == 0:
840
+ return jsonify({'error': 'Failed to generate or load dataset.'}), 500
841
+
842
+ # Scale data
843
+ scaler_X = StandardScaler()
844
+ scaler_y = StandardScaler()
845
+
846
+ X_scaled = scaler_X.fit_transform(X)
847
+ y_scaled = scaler_y.fit_transform(y.reshape(-1, 1)).flatten()
848
+
849
+ X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_scaled, test_size=0.2, random_state=42)
850
+
851
+ # Train SVR model
852
+ svr_model = SVR(kernel=kernel_type, C=C_param, gamma=gamma_param, epsilon=epsilon_param)
853
+ svr_model.fit(X_train, y_train)
854
+
855
+ # Make predictions
856
+ y_pred_scaled = svr_model.predict(X_test)
857
+
858
+ # Inverse transform predictions to original scale for metrics
859
+ y_pred = scaler_y.inverse_transform(y_pred_scaled.reshape(-1, 1)).flatten()
860
+ y_test_original = scaler_y.inverse_transform(y_test.reshape(-1, 1)).flatten()
861
+
862
+ # Calculate metrics
863
+ mse = mean_squared_error(y_test_original, y_pred)
864
+ r2 = r2_score(y_test_original, y_pred)
865
+ support_vectors_count = len(svr_model.support_vectors_)
866
+
867
+ # Prepare data for plotting
868
+ plot_X_original = scaler_X.inverse_transform(X_scaled)
869
+ plot_y_original = scaler_y.inverse_transform(y_scaled.reshape(-1, 1)).flatten()
870
+
871
+ x_plot = np.linspace(plot_X_original.min(), plot_X_original.max(), 500).reshape(-1, 1)
872
+ x_plot_scaled = scaler_X.transform(x_plot)
873
+ y_plot_scaled = svr_model.predict(x_plot_scaled)
874
+ y_plot_original = scaler_y.inverse_transform(y_plot_scaled.reshape(-1, 1)).flatten()
875
+
876
+ y_upper_scaled = y_plot_scaled + epsilon_param
877
+ y_lower_scaled = y_plot_scaled - epsilon_param
878
+ y_upper_original = scaler_y.inverse_transform(y_upper_scaled.reshape(-1, 1)).flatten()
879
+ y_lower_original = scaler_y.inverse_transform(y_lower_scaled.reshape(-1, 1)).flatten()
880
+
881
+ plot_data = {
882
+ 'data': [
883
+ {
884
+ 'x': plot_X_original.flatten().tolist(),
885
+ 'y': plot_y_original.tolist(),
886
+ 'mode': 'markers',
887
+ 'type': 'scatter',
888
+ 'name': 'Original Data'
889
+ },
890
+ {
891
+ 'x': x_plot.flatten().tolist(),
892
+ 'y': y_plot_original.tolist(),
893
+ 'mode': 'lines',
894
+ 'type': 'scatter',
895
+ 'name': 'SVR Prediction',
896
+ 'line': {'color': 'red'}
897
+ },
898
+ {
899
+ 'x': x_plot.flatten().tolist(),
900
+ 'y': y_upper_original.tolist(),
901
+ 'mode': 'lines',
902
+ 'type': 'scatter',
903
+ 'name': 'Epsilon Tube (Upper)',
904
+ 'line': {'dash': 'dash', 'color': 'green'},
905
+ 'fill': 'tonexty',
906
+ 'fillcolor': 'rgba(0,128,0,0.1)'
907
+ },
908
+ {
909
+ 'x': x_plot.flatten().tolist(),
910
+ 'y': y_lower_original.tolist(),
911
+ 'mode': 'lines',
912
+ 'type': 'scatter',
913
+ 'name': 'Epsilon Tube (Lower)',
914
+ 'line': {'dash': 'dash', 'color': 'green'}
915
+ }
916
+ ],
917
+ 'layout': {
918
+ 'title': f'SVR Regression (Kernel: {kernel_type.upper()})',
919
+ 'xaxis': {'title': 'Feature Value'},
920
+ 'yaxis': {'title': 'Target Value'},
921
+ 'hovermode': 'closest'
922
+ }
923
+ }
924
+
925
+ return jsonify({
926
+ 'mse': mse,
927
+ 'r2_score': r2,
928
+ 'support_vectors_count': support_vectors_count,
929
+ 'plot_data': plot_data
930
+ })
931
+
932
+ except Exception as e:
933
+ print(f"Error in SVR demo: {e}")
934
+ return jsonify({'error': str(e)}), 500
935
+
936
+
937
+ def clean_text(text):
938
+ return text.lower().strip()
939
+
940
+ # Gradient-desent route
941
+ @app.route('/gradient-descent')
942
+ def gradient_descent():
943
+ return render_template('Gradient-Descen.html')
944
+ #new
945
+
946
+ @app.route('/gradient-descent-three')
947
+ def gradient_descent_three():
948
+ return render_template('gradient-descent-three.html')
949
+
950
+
951
+ # Gradient-boosting route
952
+ @app.route('/gradient-boosting')
953
+ def gradient_boosting():
954
+ return render_template('Gradient-Boosting.html')
955
+ #new
956
+ @app.route('/gradient-boosting-three')
957
+ def gradient_boosting_three():
958
+ return render_template('gradient-boosting-three.html')
959
+
960
+
961
+
962
+ # Gradient-xgboost route
963
+ @app.route('/xgboost-regression')
964
+ def xgboost_regression():
965
+ return render_template('XGBoost-Regression.html')
966
+
967
+ @app.route('/xgboost-tree-three')
968
+ def xgboost_regression_three():
969
+ return render_template('xboost-tree-three.html')
970
+
971
+ @app.route('/xgboost-graph-three2')
972
+ def xgboost_regression_three2():
973
+ return render_template('xbost-graph-three.html')
974
+
975
+
976
+
977
+ #Gradient-lightgbm route
978
+ @app.route('/lightgbm')
979
+ def lightgbm():
980
+ return render_template('LightGBM-Regression.html')
981
+
982
+
983
+ @app.route('/Naive-Bayes-Simulator')
984
+ def Naive_Bayes_Simulator():
985
+ return render_template('Naive-Bayes-Simulator.html')
986
+
987
+ @app.route('/svm-model-three')
988
+ def svm_model_three():
989
+ return render_template('SVM_Simulator_3D.html')
990
+
991
+
992
+
993
+ #nerual network route for calssifcation
994
+ @app.route('/neural-network-classification')
995
+ def neural_network_classification():
996
+ return render_template('Neural-Networks-for-Classification.html')
997
+
998
+ @app.route('/Neural-Networks-for-Classification-three')
999
+ def Neural_Networks_for_Classification_three():
1000
+ return render_template('Neural-Networks-for-Classification-three.html')
1001
+
1002
+
1003
+
1004
+ #hierarchical clustering route
1005
+
1006
+ @app.route('/hierarchical-clustering')
1007
+ def hierarchical_clustering():
1008
+ return render_template('Hierarchical-Clustering.html')
1009
+
1010
+ @app.route('/hierarchical-three')
1011
+ def hierarchical_three():
1012
+ return render_template('Hierarchical-three.html')
1013
+
1014
+
1015
+ #Gaussian-mixture-models route
1016
+ @app.route('/gaussian-mixture-models')
1017
+ def gaussian_mixture_models():
1018
+ return render_template('Gaussian-Mixture-Models.html')
1019
+
1020
+ @app.route('/gaussian-mixture-three')
1021
+ def gaussian_mixture_three():
1022
+ return render_template('gmm-threejs.html')
1023
+
1024
+
1025
+
1026
+
1027
+ #Principal-Component-Analysis
1028
+ @app.route('/pca')
1029
+ def pca():
1030
+ return render_template('Principal-Component-Analysis.html')
1031
+
1032
+ @app.route('/pca-three')
1033
+ def pca_three():
1034
+ return render_template('pca-threejs.html')
1035
+
1036
+
1037
+
1038
+ #t-sne
1039
+ @app.route('/t-sne')
1040
+ def tsne():
1041
+ return render_template('t-SNE.html')
1042
+
1043
+ @app.route('/t-sne-three')
1044
+ def tsne_three():
1045
+ return render_template('t-sne-three.html')
1046
+
1047
+
1048
+ # liner-discriminant-analysis
1049
+ @app.route('/lda')
1050
+ def lda():
1051
+ return render_template('Linear-Discriminant-Analysis.html')
1052
+
1053
+
1054
+ @app.route('/lda-three')
1055
+ def lda_three():
1056
+ return render_template('lda-three.html')
1057
+
1058
+
1059
+ # Independent-Component-Analysis
1060
+ @app.route('/ica')
1061
+ def ica():
1062
+ return render_template('Independent-Component-Analysis.html')
1063
+
1064
+
1065
+
1066
+ @app.route('/ica-three')
1067
+ def ica_three():
1068
+ return render_template('ica-threejs.html')
1069
+
1070
+
1071
+ #Apriori
1072
+ @app.route('/apriori')
1073
+ def apriori():
1074
+ return render_template('Apriori-Algorithm.html')
1075
+
1076
+ @app.route('/apriori-three')
1077
+ def apriori_three():
1078
+ return render_template('Apriori-Simulator-three.html')
1079
+
1080
+
1081
+ # Eclat Algorithm
1082
+ @app.route('/eclat')
1083
+ def eclat():
1084
+ return render_template('Eclat-Algorithm.html')
1085
+
1086
+ @app.route('/eclat-three')
1087
+ def eclat_three():
1088
+ return render_template('Eclat-Algorithm-three.html')
1089
+
1090
+ #genrative models
1091
+ @app.route('/generative-models')
1092
+ def generative_models():
1093
+ return render_template('Generative-Models.html')
1094
+
1095
+ #self training
1096
+ @app.route('/self-training')
1097
+ def self_training():
1098
+ return render_template('Self-Training.html')
1099
+
1100
+
1101
+ # TRANSDUCTIVE SVM
1102
+ @app.route('/transductive-svm')
1103
+ def transductive_svm():
1104
+ return render_template('Transductive-SVM.html')
1105
+
1106
+
1107
+ #Graph-Based Methods
1108
+ @app.route('/graph-based-methods')
1109
+ def graph_based_methods():
1110
+ return render_template('Graph-Based-Method.html')
1111
+
1112
+ #Agent-Environment-State
1113
+ @app.route('/agent-environment-state')
1114
+ def agent_environment_state():
1115
+ return render_template('Agent-Environment-State.html')
1116
+
1117
+ #Action and Policy
1118
+ @app.route('/action-and-policy')
1119
+ def action_and_policy():
1120
+ return render_template('Action-and-Policy.html')
1121
+
1122
+ #Reward-ValueFunction
1123
+ @app.route('/reward-valuefunction')
1124
+ def reward_valuefunction():
1125
+ return render_template('Reward-ValueFunction.html')
1126
+
1127
+ #Q-Learning
1128
+ @app.route('/q-learning')
1129
+ def q_learning():
1130
+ return render_template('Q-Learning.html')
1131
+
1132
+ #Deep Reinforcement Learning
1133
+ @app.route('/deep-reinforcement-learning')
1134
+ def deep_reinforcement_learning():
1135
+ return render_template('Deep-Reinforcement-Learning.html')
1136
+
1137
+
1138
+ #Bagging
1139
+ @app.route('/bagging')
1140
+ def bagging():
1141
+ return render_template('Bagging.html')
1142
+
1143
+ #Boosting
1144
+ @app.route('/boosting')
1145
+ def boosting():
1146
+ return render_template('Boosting.html')
1147
+
1148
+ # stacking
1149
+ @app.route('/stacking')
1150
+ def stacking():
1151
+ return render_template('Stacking.html')
1152
+
1153
+ # voting
1154
+ @app.route('/voting')
1155
+ def voting():
1156
+ return render_template('Voting.html')
1157
+
1158
+ import re
1159
+
1160
+ # Load saved model and vectorizer
1161
+ # model = joblib.load("Models/logistic_model.pkl")
1162
+ # vectorizer = joblib.load("Models/logvectorizer.pkl")
1163
+
1164
+
1165
+ # Text cleaning
1166
+ def clean_text(text):
1167
+ text = text.lower()
1168
+ text = re.sub(r'\W', ' ', text)
1169
+ text = re.sub(r'\s+[a-zA-Z]\s+', ' ', text)
1170
+ text = re.sub(r'\s+', ' ', text)
1171
+ return text.strip()
1172
+
1173
+ @app.route('/logistic', methods=['GET', 'POST'])
1174
+ def logistic():
1175
+ prediction, confidence_percentage, cleaned, tokens, probability = None, None, None, None, None
1176
+
1177
+
1178
+ # model = load_file("Models/logistic_model.pkl")
1179
+ # vectorizer = load_file("Models/logvectorizer.pkl")
1180
+ model = load_file("logistic_model.pkl")
1181
+ vectorizer = load_file("logvectorizer.pkl")
1182
+
1183
+ if request.method == "POST":
1184
+ msg = request.form.get('message', '')
1185
+ cleaned = clean_text(msg)
1186
+ tokens = cleaned.split()
1187
+
1188
+
1189
+ try:
1190
+ vector = vectorizer.transform([cleaned])
1191
+ probability = model.predict_proba(vector)[0][1]
1192
+ prediction = "Spam" if probability >= 0.5 else "Not Spam"
1193
+ confidence_percentage = round(probability * 100, 2)
1194
+ except Exception as e:
1195
+ print("Error predicting:", e)
1196
+ prediction = "Error"
1197
+ confidence_percentage = 0
1198
+
1199
+ return render_template(
1200
+ "logistic.html",
1201
+ prediction=prediction,
1202
+ confidence_percentage=confidence_percentage,
1203
+ cleaned=cleaned,
1204
+ tokens=tokens,
1205
+ probability=round(probability, 4) if probability else None,
1206
+ source="sms"
1207
+ )
1208
+
1209
+ @app.route('/logistic-sms', methods=['POST'])
1210
+ def logistic_sms():
1211
+ try:
1212
+ data = request.get_json()
1213
+ msg = data.get('message', '')
1214
+ cleaned = clean_text(msg)
1215
+ tokens = cleaned.split()
1216
+
1217
+ vector = vectorizer.transform([cleaned])
1218
+ probability = model.predict_proba(vector)[0][1]
1219
+ prediction = "Spam" if probability >= 0.5 else "Not Spam"
1220
+ confidence_percentage = round(probability * 100, 2)
1221
+
1222
+ return jsonify({
1223
+ "prediction": prediction,
1224
+ "confidence": confidence_percentage,
1225
+ "probability": round(probability, 4),
1226
+ "cleaned": cleaned,
1227
+ "tokens": tokens,
1228
+ "source": "json"
1229
+ })
1230
+
1231
+ except Exception as e:
1232
+ print("Error in /logistic-sms:", e)
1233
+ return jsonify({"error": "Internal server error", "details": str(e)}), 500
1234
+
1235
+
1236
+
1237
+ # @app.route("/logistic", methods=["GET", "POST"])
1238
+ # def logistic():
1239
+ # prediction = None
1240
+ # error = None
1241
+ # if request.method == "POST":
1242
+ # try:
1243
+ # input_text = request.form.get("message")
1244
+
1245
+ # # Load the vectorizer and logistic model from Models folder
1246
+ # vectorizer = joblib.load("Models/vectorizer.pkl")
1247
+ # model = joblib.load("Models/logistic_model.pkl")
1248
+
1249
+ # # Transform input and make prediction
1250
+ # input_vector = vectorizer.transform([input_text])
1251
+ # result = model.predict(input_vector)[0]
1252
+
1253
+ # prediction = "✅ Not Spam" if result == 0 else "🚨 Spam"
1254
+ # except Exception as e:
1255
+ # error = str(e)
1256
+
1257
+ # return render_template("logistic.html", prediction=prediction, error=error)
1258
+
1259
+
1260
+
1261
+
1262
+
1263
+
1264
+ #---------- LOAD MODEL & LABELS ONCE (startup) ----------
1265
+ # MODEL_PATH = os.path.join("Models", "knnmodel.joblib") # adjust if your filename is different
1266
+ # LABELS_PATH = os.path.join("Models", "label_classes.npy")
1267
+
1268
+ # try:
1269
+ # model = joblib.load(MODEL_PATH)
1270
+ # except Exception as e:
1271
+ # # Keep model as None so routes can return clear error if it's missing
1272
+ # current_app.logger if hasattr(current_app, "logger") else print
1273
+ # print(f"Failed to load model from {MODEL_PATH}: {e}")
1274
+ # model = None
1275
+
1276
+ # try:
1277
+ # label_classes = np.load(LABELS_PATH, allow_pickle=True)
1278
+ # except Exception as e:
1279
+ # print(f"Failed to load label_classes from {LABELS_PATH}: {e}")
1280
+ # label_classes = None
1281
+
1282
+ HF_DATASET_REPO = "deedrop1140/qwen-ml-tutor-assets"
1283
+
1284
+
1285
+ def load_knn_assets():
1286
+ try:
1287
+ model_path = hf_hub_download(
1288
+ repo_id=HF_DATASET_REPO,
1289
+ filename="knnmodel.joblib",
1290
+ repo_type="dataset"
1291
+ )
1292
+
1293
+ labels_path = hf_hub_download(
1294
+ repo_id=HF_DATASET_REPO,
1295
+ filename="label_classes.npy",
1296
+ repo_type="dataset"
1297
+ )
1298
+
1299
+ model = joblib.load(model_path)
1300
+ label_classes = np.load(labels_path, allow_pickle=True)
1301
+
1302
+ return model, label_classes
1303
+
1304
+ except Exception as e:
1305
+ print("❌ Failed to load KNN assets from Hugging Face:", e)
1306
+ return None, None
1307
+
1308
+
1309
+ # ---------- KNN VISUAL ROUTES (unchanged) ----------
1310
+ @app.route("/knn")
1311
+ def knn_visual():
1312
+ return render_template("knn.html")
1313
+
1314
+ @app.route('/knn_visual_predict', methods=['POST'])
1315
+ def knn_visual_predict():
1316
+ data = request.get_json()
1317
+ points = np.array(data['points']) # shape: (N, 3)
1318
+ test_point = np.array(data['test_point']) # shape: (2,)
1319
+ k = int(data['k'])
1320
+
1321
+ X = points[:, :2]
1322
+ y = points[:, 2].astype(int)
1323
+
1324
+ knn_local = KNeighborsClassifier(n_neighbors=k)
1325
+ knn_local.fit(X, y)
1326
+ pred = knn_local.predict([test_point])[0]
1327
+
1328
+ dists = np.linalg.norm(X - test_point, axis=1)
1329
+ neighbor_indices = np.argsort(dists)[:k]
1330
+ neighbors = X[neighbor_indices]
1331
+
1332
+ return jsonify({
1333
+ 'prediction': int(pred),
1334
+ 'neighbors': neighbors.tolist()
1335
+ })
1336
+
1337
+ # ---------- IMAGE PREDICTION ROUTE (fixed) ----------
1338
+ @app.route("/knn_image")
1339
+ def knn_image_page():
1340
+ return render_template("knn_image.html")
1341
+
1342
+ @app.route("/predict_image", methods=["POST"])
1343
+ def predict_image():
1344
+ if model is None or label_classes is None:
1345
+ return jsonify({"error": "Model not loaded"}), 500
1346
+
1347
+ if "image" not in request.files:
1348
+ return jsonify({"error": "No image uploaded"}), 400
1349
+
1350
+ file = request.files["image"]
1351
+
1352
+ try:
1353
+ image = Image.open(file.stream).convert("L")
1354
+ image = image.resize((28, 28))
1355
+ img_array = np.array(image).reshape(1, -1).astype("float32")
1356
+ except Exception as e:
1357
+ return jsonify({"error": f"Invalid image. {str(e)}"}), 400
1358
+
1359
+ probs = model.predict_proba(img_array)[0]
1360
+ pred_index = np.argmax(probs)
1361
+ pred_label = label_classes[pred_index]
1362
+ confidence = round(float(probs[pred_index]) * 100, 2)
1363
+
1364
+ return jsonify({
1365
+ "prediction": str(pred_label),
1366
+ "confidence": f"{confidence}%",
1367
+ "all_probabilities": {
1368
+ str(label_classes[i]): round(float(probs[i]) * 100, 2)
1369
+ for i in range(len(probs))
1370
+ }
1371
+ })
1372
+
1373
+
1374
+ @app.route("/rfc")
1375
+ def random_forest_page():
1376
+ return render_template("Random_Forest_Classifier.html") # Your beautiful HTML goes in rfc.html
1377
+
1378
+ @app.route('/rf_visual_predict', methods=['POST'])
1379
+ def rf_visual_predict():
1380
+ try:
1381
+ data = request.get_json()
1382
+ print("📦 Incoming JSON data:", data)
1383
+
1384
+ labeled_points = data.get('points')
1385
+ test_point = data.get('test_point')
1386
+
1387
+ if not labeled_points or not test_point:
1388
+ return jsonify({"error": "Missing points or test_point"}), 400
1389
+
1390
+ df = pd.DataFrame(labeled_points, columns=['X1', 'X2', 'Class'])
1391
+ X = df[['X1', 'X2']]
1392
+ y = df['Class']
1393
+
1394
+ rf_model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=42)
1395
+ rf_model.fit(X, y)
1396
+
1397
+ test_point_np = np.array(test_point).reshape(1, -1)
1398
+ prediction = int(rf_model.predict(test_point_np)[0])
1399
+
1400
+ x_min, x_max = X['X1'].min() - 1, X['X1'].max() + 1
1401
+ y_min, y_max = X['X2'].min() - 1, X['X2'].max() + 1
1402
+ xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
1403
+ np.linspace(y_min, y_max, 100))
1404
+
1405
+ Z = rf_model.predict(np.c_[xx.ravel(), yy.ravel()])
1406
+ Z = Z.reshape(xx.shape)
1407
+
1408
+ return jsonify({
1409
+ 'prediction': prediction,
1410
+ 'decision_boundary_z': Z.tolist(),
1411
+ 'decision_boundary_x_coords': xx[0, :].tolist(),
1412
+ 'decision_boundary_y_coords': yy[:, 0].tolist()
1413
+ })
1414
+
1415
+ except Exception as e:
1416
+ import traceback
1417
+ print("❌ Exception in /rf_visual_predict:")
1418
+ traceback.print_exc() # Print full error stack trace
1419
+ return jsonify({"error": str(e)}), 500
1420
+
1421
+ @app.route("/liar")
1422
+ def liar_input_page():
1423
+ return render_template("rfc_liar_predict.html")
1424
+
1425
+
1426
+
1427
+
1428
+
1429
+
1430
+
1431
+ @app.route("/ref/liar/predictor", methods=["POST"])
1432
+ def liar_predictor():
1433
+ try:
1434
+ data = request.get_json()
1435
+ statement = data.get("statement", "")
1436
+
1437
+ if not statement:
1438
+ return jsonify({"success": False, "error": "Missing statement"}), 400
1439
+
1440
+ try:
1441
+ # 🔍 LIAR Model Prediction
1442
+ features = vectorizer.transform([statement])
1443
+ prediction = model.predict(features)[0]
1444
+
1445
+ liar_label_map = {
1446
+ 0: "It can be false 🔥",
1447
+ 1: "False ❌",
1448
+ 2: "Mostly false but can be true 🤏",
1449
+ 3: "Half True 🌓",
1450
+ 4: "Mostly True 👍",
1451
+ 5: "True ✅"
1452
+ }
1453
+
1454
+ prediction_label = liar_label_map.get(int(prediction), "Unknown")
1455
+
1456
+ except ValueError as ve:
1457
+ if "features" in str(ve):
1458
+ # Fallback to Gemini API
1459
+ prediction_label = ask_gemini(statement)
1460
+ else:
1461
+ raise ve
1462
+
1463
+ # 🧠 BERT-Based Scientific Check
1464
+ bert_result = bert_checker(statement)[0]
1465
+ bert_label = bert_result["label"]
1466
+ bert_score = round(bert_result["score"] * 100, 2)
1467
+
1468
+ science_label_map = {
1469
+ "LABEL_0": "✅ Scientifically Possible",
1470
+ "LABEL_1": "❌ Scientifically Impossible"
1471
+ }
1472
+
1473
+ scientific_check = f"{science_label_map.get(bert_label, bert_label)} ({bert_score:.2f}%)"
1474
+
1475
+ return jsonify({
1476
+ "success": True,
1477
+ "prediction": prediction_label,
1478
+ "reason": "Predicted from linguistic and content-based patterns, or Gemini fallback.",
1479
+ "scientific_check": scientific_check
1480
+ })
1481
+
1482
+ except Exception as e:
1483
+ traceback.print_exc()
1484
+ return jsonify({"success": False, "error": str(e)}), 500
1485
+
1486
+
1487
+
1488
+ #svm
1489
+ @app.route("/svm")
1490
+ def svm_page():
1491
+ return render_template("svm.html")
1492
+
1493
+ @app.route('/svm_visual_predict', methods=['POST'])
1494
+ def svm_visual_predict():
1495
+ data = request.json
1496
+ labeled_points = data['points']
1497
+ test_point = data['test_point']
1498
+ svm_type = data['svm_type']
1499
+ c_param = float(data['c_param'])
1500
+ gamma_param = float(data['gamma_param']) # Will be ignored for linear kernel
1501
+
1502
+ df = pd.DataFrame(labeled_points, columns=['X1', 'X2', 'Class'])
1503
+ X = df[['X1', 'X2']]
1504
+ y = df['Class']
1505
+
1506
+ # 1. Train the SVM Classifier
1507
+ if svm_type == 'linear':
1508
+ svm_model = svm.SVC(kernel='linear', C=c_param, random_state=42)
1509
+ elif svm_type == 'rbf':
1510
+ svm_model = svm.SVC(kernel='rbf', C=c_param, gamma=gamma_param, random_state=42)
1511
+ else:
1512
+ return jsonify({'error': 'Invalid SVM type'}), 400
1513
+
1514
+ svm_model.fit(X, y)
1515
+
1516
+ # 2. Predict for the test point
1517
+ test_point_np = np.array(test_point).reshape(1, -1)
1518
+ prediction = int(svm_model.predict(test_point_np)[0])
1519
+
1520
+ # 3. Get Support Vectors
1521
+ # support_vectors_ refers to indices of support vectors
1522
+ # svc_model.support_vectors_ gives the actual support vectors
1523
+ support_vectors = svm_model.support_vectors_.tolist()
1524
+
1525
+ # 4. Generate data for the decision boundary
1526
+ # Create a meshgrid of points to predict across the entire plot area
1527
+ x_min, x_max = X['X1'].min() - 1, X['X1'].max() + 1
1528
+ y_min, y_max = X['X2'].min() - 1, X['X2'].max() + 1
1529
+
1530
+ # Extend range slightly to ensure test point is within boundary if it's an outlier
1531
+ x_min = min(x_min, test_point_np[0,0] - 1)
1532
+ x_max = max(x_max, test_point_np[0,0] + 1)
1533
+ y_min = min(y_min, test_point_np[0,1] - 1)
1534
+ y_max = max(y_max, test_point_np[0,1] + 1)
1535
+
1536
+ xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
1537
+ np.linspace(y_min, y_max, 100))
1538
+
1539
+ # Predict class for each point in the meshgrid
1540
+ Z = svm_model.predict(np.c_[xx.ravel(), yy.ravel()])
1541
+ Z = Z.reshape(xx.shape)
1542
+
1543
+ # Convert numpy arrays to lists for JSON serialization
1544
+ decision_boundary_z = Z.tolist()
1545
+ decision_boundary_x_coords = xx[0, :].tolist()
1546
+ decision_boundary_y_coords = yy[:, 0].tolist()
1547
+
1548
+ return jsonify({
1549
+ 'prediction': prediction,
1550
+ 'decision_boundary_z': decision_boundary_z,
1551
+ 'decision_boundary_x_coords': decision_boundary_x_coords,
1552
+ 'decision_boundary_y_coords': decision_boundary_y_coords,
1553
+ 'support_vectors': support_vectors
1554
+ })
1555
+
1556
+
1557
+
1558
+
1559
+
1560
+
1561
+
1562
+ @app.route('/api/explain', methods=['POST'])
1563
+ def explain():
1564
+ # In a real deployed environment, you'd secure your API key.
1565
+ # For Canvas, it's automatically injected if GEMINI_API_KEY is empty string.
1566
+ # If running locally and not in Canvas, set GEMINI_API_KEY in your environment variables.
1567
+ if not GEMINI_API_KEY and not os.getenv("FLASK_ENV") == "development": # Allow empty key in dev for local testing
1568
+ return jsonify({'error': 'Missing API key'}), 500
1569
+
1570
+ payload = request.get_json()
1571
+
1572
+ try:
1573
+ response = requests.post(
1574
+ f"{GEMINI_URL}?key={GEMINI_API_KEY}",
1575
+ headers={"Content-Type": "application/json"},
1576
+ json=payload
1577
+ )
1578
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
1579
+ return jsonify(response.json())
1580
+ except requests.exceptions.RequestException as e:
1581
+ app.logger.error(f"Error calling Gemini API: {e}") # Log the error on the server side
1582
+ return jsonify({'error': str(e)}), 500
1583
+
1584
+ @app.route('/decision_tree')
1585
+ def decision_tree_page():
1586
+ # This route serves your Decision Tree visualization page
1587
+ # Ensure the HTML file name matches (e.g., 'decision_tree_viz.html' or 'decision_tree.html')
1588
+ return render_template('decision_tree.html') # Check your actual HTML file name here
1589
+
1590
+
1591
+ @app.route('/game')
1592
+ def decision_tree_game():
1593
+ """Renders the interactive game page for decision trees."""
1594
+ return render_template('decision_tree_game.html')
1595
+
1596
+ @app.route('/dt_visual_predict', methods=['POST'])
1597
+ def dt_visual_predict():
1598
+ try:
1599
+ data = request.json
1600
+ labeled_points = data['points']
1601
+ test_point = data['test_point']
1602
+ max_depth = int(data['max_depth'])
1603
+
1604
+ # Convert labeled_points to a pandas DataFrame
1605
+ df = pd.DataFrame(labeled_points, columns=['X1', 'X2', 'Class'])
1606
+ X = df[['X1', 'X2']]
1607
+ y = df['Class']
1608
+
1609
+ # Check if there's enough data to train
1610
+ if X.empty or len(X) < 2:
1611
+ return jsonify({'error': 'Not enough data points to train the model.'}), 400
1612
+
1613
+ # 1. Train the Decision Tree Classifier (This is the "model" part)
1614
+ dt_model = DecisionTreeClassifier(max_depth=max_depth, random_state=42)
1615
+ dt_model.fit(X, y)
1616
+
1617
+ # 2. Predict for the test point
1618
+ test_point_np = np.array(test_point).reshape(1, -1)
1619
+ prediction = int(dt_model.predict(test_point_np)[0])
1620
+
1621
+ # 3. Generate data for the decision boundary
1622
+ x_min, x_max = X['X1'].min(), X['X1'].max()
1623
+ y_min, y_max = X['X2'].min(), X['X2'].max()
1624
+
1625
+ # Add a buffer to the plot range to make sure points are not on the edge
1626
+ # And handle cases where min == max (e.g., all points have same X1 value)
1627
+ x_buffer = 1.0 if (x_max - x_min) == 0 else (x_max - x_min) * 0.1
1628
+ y_buffer = 1.0 if (y_max - y_min) == 0 else (y_max - y_min) * 0.1
1629
+
1630
+ x_min -= x_buffer
1631
+ x_max += x_buffer
1632
+ y_min -= y_buffer
1633
+ y_max += y_buffer
1634
+
1635
+ # Ensure test point is also comfortably within the range
1636
+ x_min = min(x_min, test_point_np[0,0] - 0.5)
1637
+ x_max = max(x_max, test_point_np[0,0] + 0.5)
1638
+ y_min = min(y_min, test_point_np[0,1] - 0.5)
1639
+ y_max = max(y_max, test_point_np[0,1] + 0.5)
1640
+
1641
+ # Create a meshgrid for plotting the decision boundary
1642
+ xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
1643
+ np.linspace(y_min, y_max, 100))
1644
+
1645
+ # Predict class for each point in the meshgrid using the trained model
1646
+ Z = dt_model.predict(np.c_[xx.ravel(), yy.ravel()])
1647
+ Z = Z.reshape(xx.shape)
1648
+
1649
+ # Convert numpy arrays to lists for JSON serialization
1650
+ decision_boundary_z = Z.tolist()
1651
+ decision_boundary_x_coords = xx[0, :].tolist()
1652
+ decision_boundary_y_coords = yy[:, 0].tolist()
1653
+
1654
+ return jsonify({
1655
+ 'prediction': prediction,
1656
+ 'decision_boundary_z': decision_boundary_z,
1657
+ 'decision_boundary_x_coords': decision_boundary_x_coords,
1658
+ 'decision_boundary_y_coords': decision_boundary_y_coords
1659
+ })
1660
+ except Exception as e:
1661
+ # This will print the actual error to your terminal
1662
+ print(f"An error occurred in /dt_visual_predict: {e}")
1663
+ # Return a more informative error message to the frontend
1664
+ return jsonify({'error': f'Backend Error: {str(e)}. Check server console for details.'}), 500
1665
+
1666
+ # --- Naive Bayes Routes ---
1667
+
1668
+ from urllib.parse import urlparse
1669
+ from sklearn.naive_bayes import GaussianNB
1670
+ from nltk.corpus import words
1671
+
1672
+ nb_model = load_file("nb_url_model.pkl")
1673
+ vectorizer = load_file("nb_url_vectorizer.pkl")
1674
+
1675
+ # if nb_model is not None and vectorizer is not None:
1676
+ # print("✅ Loaded Naive Bayes URL model")
1677
+ # else:
1678
+ # nb_model, vectorizer = None, None
1679
+ # print("❌ vectorizer not found")
1680
+
1681
+
1682
+
1683
+ @app.route('/nb_spam')
1684
+ def nb_spam_page():
1685
+ return render_template('NB_spam.html')
1686
+
1687
+
1688
+ import re
1689
+ from urllib.parse import urlparse
1690
+ from spellchecker import SpellChecker
1691
+ import wordninja
1692
+
1693
+
1694
+
1695
+ # ---- Whitelist (your full one, unchanged) ----
1696
+ whitelist = set([
1697
+ # Search Engines
1698
+ 'google', 'bing', 'yahoo', 'duckduckgo', 'baidu', 'ask',
1699
+
1700
+ # Social Media
1701
+ 'facebook', 'instagram', 'twitter', 'linkedin', 'snapchat', 'tiktok',
1702
+ 'threads', 'pinterest', 'reddit', 'quora',
1703
+
1704
+ # Communication Tools
1705
+ 'whatsapp', 'telegram', 'skype', 'zoom', 'meet', 'discord',
1706
+ 'teams', 'signal', 'messenger',
1707
+
1708
+ # Global E-commerce
1709
+ 'amazon', 'ebay', 'shopify', 'alibaba', 'walmart', 'target',
1710
+ 'etsy', 'shein', 'bestbuy', 'costco', 'newegg',
1711
+
1712
+ # Indian E-commerce / Services
1713
+ 'flipkart', 'myntra', 'ajio', 'nykaa', 'meesho', 'snapdeal',
1714
+ 'paytm', 'phonepe', 'mobikwik', 'zomato', 'swiggy', 'ola', 'uber', 'bookmyshow',
1715
+ 'ixigo', 'makemytrip', 'yatra', 'redbus', 'bigbasket', 'grofers', 'blinkit',
1716
+ 'universalcollegeofengineering',
1717
+
1718
+ # Education / Productivity
1719
+ 'youtube', 'docs', 'drive', 'calendar', 'photos', 'gmail', 'notion',
1720
+ 'edx', 'coursera', 'udemy', 'khanacademy', 'byjus', 'unacademy',
1721
+
1722
+ # News / Media / Tech
1723
+ 'bbc', 'cnn', 'nyt', 'forbes', 'bloomberg', 'reuters',
1724
+ 'ndtv', 'indiatimes', 'thehindu', 'hindustantimes', 'indiatoday',
1725
+ 'techcrunch', 'verge', 'wired',
1726
+
1727
+ # Streaming / Entertainment
1728
+ 'netflix', 'hotstar', 'primevideo', 'spotify', 'gaana', 'wynk', 'saavn', 'voot',
1729
+
1730
+ # Dev & Tools
1731
+ 'github', 'stackoverflow', 'medium', 'gitlab', 'bitbucket',
1732
+ 'adobe', 'figma', 'canva',
1733
+
1734
+ # Financial / Banking
1735
+ 'hdfcbank', 'icicibank', 'sbi', 'axisbank', 'kotak', 'boi', 'upi',
1736
+ 'visa', 'mastercard', 'paypal', 'stripe', 'razorpay', 'phonepe', 'paytm',
1737
+
1738
+ # Government / Utilities
1739
+ 'gov', 'nic', 'irctc', 'uidai', 'mygov', 'incometax', 'aadhar', 'rbi',
1740
+
1741
+ # Others Common
1742
+ 'airtel', 'jio', 'bsnl', 'vi', 'speedtest', 'cricbuzz', 'espn', 'espncricinfo',
1743
+ 'wikipedia', 'mozilla', 'opera', 'chrome', 'android', 'apple', 'windows', 'microsoft'
1744
+ ])
1745
+
1746
+ # ... your full whitelist from before ...
1747
+
1748
+
1749
+ # ---- Trusted & Bad TLDs ----
1750
+ trusted_tlds = [
1751
+ '.gov', '.nic.in', '.edu', '.ac.in', '.mil', '.org', '.int',
1752
+ '.co.in', '.gov.in', '.res.in', '.net.in', '.nic.gov.in'
1753
+ ]
1754
+
1755
+ # Expanded Bad TLDs (Rule 4)
1756
+ bad_tlds = [
1757
+ '.xyz', '.tk', '.ml', '.ga', '.cf', '.top', '.gq', '.cn',
1758
+ '.ru', '.pw', '.bid', '.link', '.loan', '.party', '.science',
1759
+ '.stream', '.webcam', '.online', '.site', '.website', '.space',
1760
+ '.club', '.buzz', '.info'
1761
+ ]
1762
+
1763
+ # Suspicious extensions (Rule 13)
1764
+ suspicious_extensions = ['.exe', '.zip', '.rar', '.js', '.php', '.asp', '.aspx', '.jsp', '.sh']
1765
+
1766
+ # Phishing keywords (Rule 11, your full list)
1767
+ phishing_keywords = [
1768
+ 'login', 'verify', 'secure', 'account', 'update', 'confirm', 'authenticate',
1769
+ 'free', 'bonus', 'offer', 'prize', 'winner', 'gift', 'coupon', 'discount',
1770
+ 'bank', 'paypal', 'creditcard', 'mastercard', 'visa', 'amex', 'westernunion',
1771
+ 'signin', 'click', 'password', 'unlock', 'recover', 'validate', 'urgency',
1772
+ 'limitedtime', 'expires', 'suspicious', 'alert', 'important', 'actionrequired'
1773
+ ]
1774
+
1775
+ # ---- Rules 5–14 ----
1776
+ rules = {
1777
+ 5: r"https?://\d{1,3}(\.\d{1,3}){3}",
1778
+ 6: r"@[A-Za-z0-9.-]+\.[A-Za-z]{2,}",
1779
+ 7: r"(free money|win now|click here)",
1780
+ 8: r"https?://[^\s]*\.(ru|cn|tk)",
1781
+ 9: r"https?://.{0,6}\..{2,6}/.{0,6}",
1782
+ 10: r"[0-9]{10,}",
1783
+ 12: r"https?://[^\s]*@[^\s]+",
1784
+ 13: r"https?://[^\s]*//[^\s]+",
1785
+ 14: r"https?://[^\s]*\?(?:[^=]+=[^&]*&){5,}",
1786
+ }
1787
+
1788
+
1789
+ # ---- Gibberish Check Helper (Rule 15) ----
1790
+ def is_gibberish_word(word):
1791
+ vowels = "aeiou"
1792
+ v_count = sum(c in vowels for c in word)
1793
+ return v_count / len(word) < 0.25
1794
+
1795
+ # # ---- Utility: Extract words from URL ----
1796
+ # def extract_words(url):
1797
+ # parsed = urlparse(url if url.startswith(("http://", "https://")) else "http://" + url)
1798
+ # raw = parsed.netloc.replace('-', '') + parsed.path.replace('-', '')
1799
+ # # Split using wordninja
1800
+ # words = wordninja.split(raw.lower())
1801
+ # # Keep only alphabetic words of length >= 3
1802
+ # words = [w for w in words if w.isalpha() and len(w) >= 3]
1803
+ # return words
1804
+ # ---- Extract words from URL ----
1805
+ def extract_words(url):
1806
+ parsed = urlparse(url if url.startswith(("http://", "https://")) else "http://" + url)
1807
+ parts = re.split(r'\W+', parsed.netloc + parsed.path)
1808
+ final_words = []
1809
+ for word in parts:
1810
+ if len(word) > 2 and word.isalpha():
1811
+ split_words = wordninja.split(word.lower())
1812
+ if len(split_words) <= 1:
1813
+ split_words = [word.lower()]
1814
+ final_words.extend(split_words)
1815
+ return final_words
1816
+
1817
+
1818
+ # --- Your original predict function, now inside the Flask app ---
1819
+ @app.route("/predict", methods=["POST"])
1820
+ def predict():
1821
+ try:
1822
+ data = request.get_json()
1823
+ url = data.get("url", "").lower()
1824
+ if not url:
1825
+ return jsonify({'error': 'No URL provided'}), 400
1826
+
1827
+ parsed = urlparse(url if url.startswith(("http://", "https://")) else "http://" + url)
1828
+ path = parsed.path
1829
+
1830
+ # ---- SpellChecker using built-in dictionary ----
1831
+ spell = SpellChecker(distance=1)
1832
+
1833
+ # ---- Extract words and check spelling ----
1834
+ words = extract_words(url)
1835
+ # ignore known TLDs
1836
+ tlds_to_ignore = [tld.replace('.', '',"/") for tld in trusted_tlds + bad_tlds]
1837
+ words_for_spellcheck = [w for w in words if w not in tlds_to_ignore]
1838
+
1839
+ misspelled = spell.unknown(words_for_spellcheck)
1840
+ steps = [{"word": w, "valid": (w not in misspelled) or (w in tlds_to_ignore)} for w in words]
1841
+
1842
+ if misspelled:
1843
+ return jsonify({
1844
+ "prediction": 1,
1845
+ "reason": f"🧾 Spelling errors: {', '.join(misspelled)}",
1846
+ "steps": steps
1847
+ })
1848
+ else:
1849
+ return jsonify({
1850
+ "prediction": 0,
1851
+ "reason": "✅ No spelling issues",
1852
+ "steps": steps
1853
+ })
1854
+
1855
+ except Exception as e:
1856
+ return jsonify({'error': f"An issue occurred during spell checking: {str(e)}"}), 500
1857
+
1858
+
1859
+
1860
+
1861
+ @app.route('/naive_bayes')
1862
+ def naive_bayes_page():
1863
+ return render_template('naive_bayes_viz.html')
1864
+
1865
+ # --- New Naive Bayes Prediction Route ---
1866
+ @app.route('/nb_visual_predict', methods=['POST'])
1867
+ def nb_visual_predict():
1868
+ try:
1869
+ data = request.json
1870
+ labeled_points = data['points']
1871
+ test_point = data['test_point']
1872
+
1873
+ df = pd.DataFrame(labeled_points, columns=['X1', 'X2', 'Class'])
1874
+ X = df[['X1', 'X2']]
1875
+ y = df['Class']
1876
+
1877
+ # Ensure enough data and at least two classes for classification
1878
+ if X.empty or len(X) < 2:
1879
+ return jsonify({'error': 'Not enough data points to train the model.'}), 400
1880
+ if len(y.unique()) < 2:
1881
+ return jsonify({'error': 'Need at least two different classes to classify.'}), 400
1882
+
1883
+ # Train Gaussian Naive Bayes Model
1884
+ # GaussianNB is suitable for continuous data
1885
+ nb_model = GaussianNB()
1886
+ nb_model.fit(X, y)
1887
+
1888
+ # Predict for the test point
1889
+ test_point_np = np.array(test_point).reshape(1, -1)
1890
+ prediction = int(nb_model.predict(test_point_np)[0])
1891
+
1892
+ # Generate data for the decision boundary
1893
+ x_min, x_max = X['X1'].min(), X['X1'].max()
1894
+ y_min, y_max = X['X2'].min(), X['X2'].max()
1895
+
1896
+ x_buffer = 1.0 if x_max - x_min == 0 else (x_max - x_min) * 0.1
1897
+ y_buffer = 1.0 if y_max - y_min == 0 else (y_max - y_min) * 0.1
1898
+
1899
+ x_min -= x_buffer
1900
+ x_max += x_buffer
1901
+ y_min -= y_buffer
1902
+ y_max += y_buffer
1903
+
1904
+ x_min = min(x_min, test_point_np[0,0] - 0.5)
1905
+ x_max = max(x_max, test_point_np[0,0] + 0.5)
1906
+ y_min = min(y_min, test_point_np[0,1] - 0.5)
1907
+ y_max = max(y_max, test_point_np[0,1] + 0.5)
1908
+
1909
+ xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
1910
+ np.linspace(y_min, y_max, 100))
1911
+
1912
+ if xx.size == 0 or yy.size == 0:
1913
+ return jsonify({'error': 'Meshgrid could not be created. Data range too narrow.'}), 400
1914
+
1915
+ # Predict class for each point in the meshgrid
1916
+ # Use predict_proba and then argmax to get class for decision boundary coloring
1917
+ Z = nb_model.predict(np.c_[xx.ravel(), yy.ravel()])
1918
+ Z = Z.reshape(xx.shape)
1919
+
1920
+ decision_boundary_z = Z.tolist()
1921
+ decision_boundary_x_coords = xx[0, :].tolist()
1922
+ decision_boundary_y_coords = yy[:, 0].tolist()
1923
+
1924
+ return jsonify({
1925
+ 'prediction': prediction,
1926
+ 'decision_boundary_z': decision_boundary_z,
1927
+ 'decision_boundary_x_coords': decision_boundary_x_coords,
1928
+ 'decision_boundary_y_coords': decision_boundary_y_coords
1929
+ })
1930
+ except Exception as e:
1931
+ print(f"An error occurred in /nb_visual_predict: {e}")
1932
+ return jsonify({'error': f'Backend Error: {str(e)}. Check server console for details.'}), 500
1933
+
1934
+ def check_with_virustotal(url):
1935
+ try:
1936
+ headers = {"x-apikey": VT_API_KEY}
1937
+ submit_url = "https://www.virustotal.com/api/v3/urls"
1938
+
1939
+ # Submit the URL for scanning
1940
+ response = requests.post(submit_url, headers=headers, data={"url": url})
1941
+ url_id = response.json()["data"]["id"]
1942
+
1943
+ # Fetch result
1944
+ result = requests.get(f"{submit_url}/{url_id}", headers=headers)
1945
+ data = result.json()
1946
+
1947
+ stats = data["data"]["attributes"]["last_analysis_stats"]
1948
+ malicious_count = stats.get("malicious", 0)
1949
+
1950
+ if malicious_count > 0:
1951
+ return True, f"☣️ VirusTotal flagged it as malicious ({malicious_count} engines)"
1952
+ return False, None
1953
+ except Exception as e:
1954
+ print(f"⚠️ VirusTotal error: {e}")
1955
+
1956
+
1957
+
1958
+ return False, None
1959
+
1960
+
1961
+
1962
+
1963
+
1964
+
1965
+
1966
+
1967
+
1968
+
1969
+ @app.route('/kmeans-clustering')
1970
+ def clustering():
1971
+ return render_template('clustering.html')
1972
+
1973
+ #image code
1974
+ @app.route('/kmeans-Dbscan-image', methods=['GET', 'POST'])
1975
+ def compress_and_clean():
1976
+ final_image = None
1977
+
1978
+ if request.method == 'POST':
1979
+ try:
1980
+ # Get form values
1981
+ mode = request.form.get('mode', 'compress')
1982
+ k = int(request.form.get('k', 8))
1983
+ eps = float(request.form.get('eps', 0.6))
1984
+ min_samples = int(request.form.get('min_samples', 50))
1985
+ image_file = request.files.get('image')
1986
+
1987
+ if image_file and image_file.filename != '':
1988
+ # Load image
1989
+ img = Image.open(image_file).convert('RGB')
1990
+ max_size = (518, 518)
1991
+ img.thumbnail(max_size, Image.Resampling.LANCZOS)
1992
+
1993
+ img_np = np.array(img)
1994
+ h, w, d = img_np.shape
1995
+ pixels = img_np.reshape(-1, d)
1996
+
1997
+ # Apply KMeans
1998
+ kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
1999
+ kmeans.fit(pixels)
2000
+ clustered_pixels = kmeans.cluster_centers_[kmeans.labels_].astype(np.uint8)
2001
+
2002
+ # Mode 1: Just Compress
2003
+ if mode == 'compress':
2004
+ final_pixels = clustered_pixels.reshape(h, w, d)
2005
+
2006
+ # Mode 2: Compress + Clean (KMeans + DBSCAN)
2007
+ else:
2008
+ # Sample to avoid MemoryError
2009
+ max_dbscan_pixels = 10000
2010
+ if len(clustered_pixels) > max_dbscan_pixels:
2011
+ idx = np.random.choice(len(clustered_pixels), max_dbscan_pixels, replace=False)
2012
+ dbscan_input = clustered_pixels[idx]
2013
+ else:
2014
+ dbscan_input = clustered_pixels
2015
+
2016
+ # DBSCAN
2017
+ # For DBSCAN: use only 10,000 pixels max
2018
+ max_dbscan_pixels = 10000
2019
+
2020
+ scaler = StandardScaler()
2021
+ pixels_scaled = scaler.fit_transform(dbscan_input)
2022
+ db = DBSCAN(eps=eps, min_samples=min_samples)
2023
+ labels = db.fit_predict(pixels_scaled)
2024
+
2025
+ # Clean noisy pixels
2026
+ clean_pixels = []
2027
+ for i in range(len(dbscan_input)):
2028
+ label = labels[i]
2029
+ clean_pixels.append([0, 0, 0] if label == -1 else dbscan_input[i])
2030
+
2031
+ # Fill extra if sampling was used
2032
+ if len(clustered_pixels) > max_dbscan_pixels:
2033
+ clean_pixels.extend([[0, 0, 0]] * (len(clustered_pixels) - len(clean_pixels)))
2034
+
2035
+ final_pixels = np.array(clean_pixels, dtype=np.uint8).reshape(h, w, d)
2036
+
2037
+ # Save final image
2038
+ final_img = Image.fromarray(final_pixels)
2039
+ final_image = 'compressed_clean.jpg'
2040
+ final_img.save(os.path.join(app.config['UPLOAD_FOLDER'], final_image), optimize=True, quality=90)
2041
+
2042
+ except Exception as e:
2043
+ return f"⚠️ Error: {str(e)}", 500
2044
+
2045
+ return render_template('kmean-dbscan-image.html', final_image=final_image)
2046
+
2047
+ @app.route('/DBscan')
2048
+ def DBSCAN():
2049
+ return render_template('DBSCAN.html')
2050
+
2051
+
2052
+ #test routs start here
2053
+
2054
+
2055
+ @app.route('/Test-layout')
2056
+ def test():
2057
+ return render_template('Test-layout.html')
2058
+
2059
+ @app.route('/Test-home')
2060
+ def Test_home():
2061
+ return render_template('Test-home.html',active_page='Test-home')
2062
+
2063
+ @app.route('/Test-supervise')
2064
+ def Test_supervise():
2065
+ return render_template('Test/Test-supervise.html', active_page='Test-supervise')
2066
+
2067
+
2068
+ @app.route('/Test-unsupervised')
2069
+ def Test_unsupervised():
2070
+ return render_template('Test/Test-unsupervised.html', active_page='Test-unsupervised')
2071
+
2072
+ # Semi-Supervised Learning page
2073
+ @app.route('/Test-semi-supervised')
2074
+ def Test_semi_supervised():
2075
+ return render_template('Test/Test-semi_supervised.html', active_page='Test-semi_supervised')
2076
+
2077
+ # Reinforcement Learning page
2078
+ @app.route('/Test-reinforcement')
2079
+ def Test_reinforcement():
2080
+ return render_template('Test/Test-reinforcement.html', active_page='Test-reinforcement')
2081
+
2082
+ # Ensemble Learning page
2083
+ @app.route('/Test-ensemble')
2084
+ def Test_ensemble():
2085
+ return render_template('Test/Test-ensemble.html', active_page='Test-ensemble')
2086
+
2087
+ #Templates/Test/Quiz-Overview-Page.html
2088
+ @app.route('/linear-Quiz-Overview-Page')
2089
+ def linear_Test_quiz_overview():
2090
+ return render_template('Test/linear-Quiz-Overview-Page.html', active_page='linear-Quiz-Overview-Page')
2091
+
2092
+
2093
+ @app.route('/Quiz-test')
2094
+ def Quiz_test():
2095
+ return render_template('Test/Quiz-test.html', active_page='Quiz-test')
2096
+ #if the dtat file doesnt show or dsiapay use render_data like this render_template('data/yourfile.json')
2097
+
2098
+ # @app.route('/Quiz-test/<topic>')
2099
+ # def quiz_topic(topic):
2100
+ # import json, os
2101
+ # count = int(request.args.get('count', 10))
2102
+ # try:
2103
+ # json_path = os.path.join(app.root_path, 'data', f'{topic}.json')
2104
+ # with open(json_path, 'r', encoding='utf-8') as f:
2105
+ # data = json.load(f) # This is your JSON array
2106
+
2107
+ # # Transform the JSON to match frontend expectations
2108
+ # transformed = []
2109
+ # for q in data[:count]:
2110
+ # transformed.append({
2111
+ # "id": q.get("id"),
2112
+ # "question": q.get("questionText"),
2113
+ # "options": q.get("options"),
2114
+ # "answer": q.get("options")[q.get("correctAnswerIndex")],
2115
+ # "explanation": q.get("explanation")
2116
+ # })
2117
+
2118
+ # return jsonify(transformed)
2119
+
2120
+ # except FileNotFoundError:
2121
+ # return "Topic not found", 404
2122
+ # except json.JSONDecodeError:
2123
+ # # return "Invalid JSON file", 500
2124
+
2125
+ # @app.route('/Quiz-test/<topic>')
2126
+ # def quiz_topic(topic):
2127
+ # import os, json
2128
+ # count = int(request.args.get('count', 10))
2129
+ # json_path = os.path.join(app.root_path, 'data', f'{topic}.json')
2130
+
2131
+ # try:
2132
+ # with open(json_path, 'r', encoding='utf-8') as f:
2133
+ # data = json.load(f)
2134
+
2135
+ # # If JSON is a dict with "questions" key
2136
+ # if isinstance(data, dict) and "questions" in data:
2137
+ # questions = data["questions"][:count]
2138
+ # elif isinstance(data, list):
2139
+ # questions = data[:count]
2140
+ # else:
2141
+ # return "Invalid JSON structure", 400
2142
+
2143
+ # return jsonify(questions)
2144
+ # except FileNotFoundError:
2145
+ # return "Topic not found", 404
2146
+ # except json.JSONDecodeError:
2147
+ # return "Invalid JSON file", 400
2148
+
2149
+ # ✅ API Route: Send JSON quiz data
2150
+ @app.route('/api/quiz/<topic>')
2151
+ def get_quiz(topic):
2152
+ count = int(request.args.get('count', 10))
2153
+ file_path = os.path.join('data', f'{topic}.json')
2154
+
2155
+ if not os.path.exists(file_path):
2156
+ return jsonify({'error': 'Topic not found'}), 404
2157
+
2158
+ with open(file_path, 'r', encoding='utf-8') as f:
2159
+ data = json.load(f)
2160
+
2161
+ questions = data.get('questions', [])[:count]
2162
+ return jsonify({'questions': questions})
2163
+
2164
+
2165
+ @app.route('/polynomial-Quiz')
2166
+ def polynomial_Test_quiz():
2167
+ return render_template('Test/polynomial-Quiz.html', active_page='polynomial-Quiz')
2168
+
2169
+ # -------------------------------
2170
+ # Regression Algorithms
2171
+ # -------------------------------
2172
+ @app.route('/ridge-regression-test')
2173
+ def ridge_regression_test():
2174
+ return render_template('Test/ridge-regression-test.html', active_page='ridge-regression-test')
2175
+
2176
+ @app.route('/lasso-regression-test')
2177
+ def lasso_regression_test():
2178
+ return render_template('Test/lasso-regression-test.html', active_page='lasso-regression-test')
2179
+
2180
+ @app.route('/svr-test')
2181
+ def svr_test():
2182
+ return render_template('Test/svr-r-test.html', active_page='svr-r-test')
2183
+
2184
+ @app.route('/decision-tree-regression-test')
2185
+ def decision_tree_regression_test():
2186
+ return render_template('Test/decision-tree-regression-test.html', active_page='decision-tree-regression-test')
2187
+
2188
+ @app.route('/random-forest-regression-test')
2189
+ def random_forest_regression_test():
2190
+ return render_template('Test/random-forest-regression-test.html', active_page='random-forest-regression-test')
2191
+
2192
+
2193
+ # -------------------------------
2194
+ # Classification Algorithms
2195
+ # -------------------------------
2196
+ @app.route('/logistic-regression-test')
2197
+ def logistic_regression_test():
2198
+ return render_template('Test/logistic-regression-test.html', active_page='logistic-regression-test')
2199
+
2200
+ @app.route('/svm-c-test')
2201
+ def svm_test():
2202
+ return render_template('Test/svm-c-test.html', active_page='svm-c-test')
2203
+
2204
+ @app.route('/decision-trees-c-test')
2205
+ def decision_trees_test():
2206
+ return render_template('Test/decision-trees-c-test.html', active_page='decision-trees-c-test')
2207
+
2208
+ @app.route('/random-forest-c-test')
2209
+ def random_forest_test():
2210
+ return render_template('Test/random-forest-c-test.html', active_page='random-forest-c-test')
2211
+
2212
+ @app.route('/gradient-descent-test')
2213
+ def gradient_descent_test():
2214
+ return render_template('Test/gradient-descent-test.html', active_page='gradient-descent-test')
2215
+
2216
+ @app.route('/gradient-boosting-test')
2217
+ def gradient_boosting_test():
2218
+ return render_template('Test/gradient-boosting-test.html', active_page='gradient-boosting-test')
2219
+
2220
+ @app.route('/xgboost-regression-test')
2221
+ def xgboost_regression_test():
2222
+ return render_template('Test/xgboost-regression-test.html', active_page='xgboost-regression-test')
2223
+
2224
+ @app.route('/lightgbm-test')
2225
+ def lightgbm_test():
2226
+ return render_template('Test/lightgbm-test.html', active_page='lightgbm-test')
2227
+
2228
+ @app.route('/knn-test')
2229
+ def knn_test():
2230
+ return render_template('Test/knn-test.html', active_page='knn-test')
2231
+
2232
+ @app.route('/naive-bayes-test')
2233
+ def naive_bayes_test():
2234
+ return render_template('Test/naive-bayes-test.html', active_page='naive-bayes-test')
2235
+
2236
+ @app.route('/neural-networks-test')
2237
+ def neural_networks_test():
2238
+ return render_template('Test/neural-networks-test.html', active_page='neural-networks-test')
2239
+
2240
+
2241
+ # -------------------------------
2242
+ # Clustering
2243
+ # -------------------------------
2244
+ @app.route('/k-means-test')
2245
+ def k_means_test():
2246
+ return render_template('Test/k-means-test.html', active_page='k-means-test')
2247
+
2248
+ @app.route('/hierarchical-clustering-test')
2249
+ def hierarchical_clustering_test():
2250
+ return render_template('Test/hierarchical-clustering-test.html', active_page='hierarchical-clustering-test')
2251
+
2252
+ @app.route('/dbscan-test')
2253
+ def dbscan_test():
2254
+ return render_template('Test/dbscan-test.html', active_page='dbscan-test')
2255
+
2256
+ @app.route('/gmm-test')
2257
+ def gmm_test():
2258
+ return render_template('Test/gmm-test.html', active_page='gmm-test')
2259
+
2260
+
2261
+ # -------------------------------
2262
+ # Dimensionality Reduction
2263
+ # -------------------------------
2264
+ @app.route('/pca-test')
2265
+ def pca_test():
2266
+ return render_template('Test/pca-test.html', active_page='pca-test')
2267
+
2268
+ @app.route('/tsne-test')
2269
+ def tsne_test():
2270
+ return render_template('Test/tsne-test.html', active_page='tsne-test')
2271
+
2272
+ @app.route('/lda-test')
2273
+ def lda_test():
2274
+ return render_template('Test/lda-test.html', active_page='lda-test')
2275
+
2276
+ @app.route('/ica-test')
2277
+ def ica_test():
2278
+ return render_template('Test/ica-test.html', active_page='ica-test')
2279
+
2280
+
2281
+ # -------------------------------
2282
+ # Association Rule Learning
2283
+ # -------------------------------
2284
+ @app.route('/apriori-test')
2285
+ def apriori_test():
2286
+ return render_template('Test/apriori-test.html', active_page='apriori-test')
2287
+
2288
+ @app.route('/eclat-test')
2289
+ def eclat_test():
2290
+ return render_template('Test/eclat-test.html', active_page='eclat-test')
2291
+
2292
+
2293
+ # -------------------------------
2294
+ # Semi-Supervised Learning
2295
+ # -------------------------------
2296
+ @app.route('/generative-models-test')
2297
+ def generative_models_test():
2298
+ return render_template('Test/generative-models-test.html', active_page='generative-models-test')
2299
+
2300
+ @app.route('/self-training-test')
2301
+ def self_training_test():
2302
+ return render_template('Test/self-training-test.html', active_page='self-training-test')
2303
+
2304
+ @app.route('/transductive-svm-test')
2305
+ def transductive_svm_test():
2306
+ return render_template('Test/transductive-svm-test.html', active_page='transductive-svm-test')
2307
+
2308
+ @app.route('/graph-based-methods-test')
2309
+ def graph_based_methods_test():
2310
+ return render_template('Test/graph-based-methods-test.html', active_page='graph-based-methods-test')
2311
+
2312
+
2313
+ # -------------------------------
2314
+ # Reinforcement Learning
2315
+ # -------------------------------
2316
+ @app.route('/agent-environment-state-test')
2317
+ def agent_environment_state_test():
2318
+ return render_template('Test/agent-environment-state-test.html', active_page='agent-environment-state-test')
2319
+
2320
+ @app.route('/action-policy-test')
2321
+ def action_policy_test():
2322
+ return render_template('Test/action-policy-test.html', active_page='action-policy-test')
2323
+
2324
+ @app.route('/reward-value-function-test')
2325
+ def reward_value_function_test():
2326
+ return render_template('Test/reward-value-function-test.html', active_page='reward-value-function-test')
2327
+
2328
+ @app.route('/q-learning-test')
2329
+ def q_learning_test():
2330
+ return render_template('Test/q-learning-test.html', active_page='q-learning-test')
2331
+
2332
+ @app.route('/deep-reinforcement-learning-test')
2333
+ def deep_reinforcement_learning_test():
2334
+ return render_template('Test/deep-reinforcement-learning-test.html', active_page='deep-reinforcement-learning-test')
2335
+
2336
+
2337
+ # -------------------------------
2338
+ # Ensemble Methods
2339
+ # -------------------------------
2340
+ @app.route('/bagging-test')
2341
+ def bagging_test():
2342
+ return render_template('Test/bagging-test.html', active_page='bagging-test')
2343
+
2344
+ @app.route('/boosting-test')
2345
+ def boosting_test():
2346
+ return render_template('Test/boosting-test.html', active_page='boosting-test')
2347
+
2348
+ @app.route('/stacking-test')
2349
+ def stacking_test():
2350
+ return render_template('Test/stacking-test.html', active_page='stacking-test')
2351
+
2352
+ @app.route('/voting-test')
2353
+ def voting_test():
2354
+ return render_template('Test/voting-test.html', active_page='voting-test')
2355
+
2356
+
2357
+
2358
+
2359
+
2360
+ # if __name__ == "__main__":
2361
+ # app.run(host="0.0.0.0", port=5000)
2362
+
2363
+ if __name__ == "__main__":
2364
+ port = int(os.environ.get("PORT", 5000))
2365
+ app.run(host="0.0.0.0", port=port)
2366
+
2367
+
2368
+
2369
+
2370
+
2371
+
2372
+
2373
+
auth/__init__.py ADDED
File without changes
auth/email.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask_mail import Message
2
+ from flask import current_app
3
+ from .extensions import mail
4
+
5
+ def send_otp(email, otp):
6
+ msg = Message(
7
+ subject="Your OTP Code",
8
+ sender=current_app.config["MAIL_USERNAME"],
9
+ recipients=[email]
10
+ )
11
+ msg.body = f"Your OTP is {otp}"
12
+ mail.send(msg)
auth/extensions.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from flask_mail import Mail
2
+
3
+ mail = Mail()
auth/jwt_utils.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from flask_jwt_extended import create_access_token
2
+
3
+ def generate_jwt(user_id):
4
+ return create_access_token(identity=user_id)