Antigravity commited on
Commit
79fb5cc
·
1 Parent(s): 3e10138

Migrate frontend to React.js with premium dark UI

Browse files
.gitignore CHANGED
@@ -8,3 +8,5 @@ static/uploads/*
8
  pip_list.txt
9
  *.mp3
10
  dataset/
 
 
 
8
  pip_list.txt
9
  *.mp3
10
  dataset/
11
+ frontend/node_modules/
12
+ frontend/dist/
app.py CHANGED
@@ -1,4 +1,5 @@
1
- from flask import Flask, render_template, request
 
2
  from werkzeug.utils import secure_filename
3
  import os
4
  import numpy as np
@@ -14,11 +15,12 @@ try:
14
  except ImportError:
15
  load_model = None
16
 
17
- app = Flask(__name__)
 
18
  app.config['UPLOAD_FOLDER'] = 'static/uploads'
19
  os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
20
 
21
- # Dataset paths (Colab format - update these for local use)
22
  TRAIN_DIR = "/content/drive/MyDrive/AML-F24/Code/image_datset/image_datset/train"
23
  TEST_DIR = "/content/drive/MyDrive/AML-F24/Code/image_datset/image_datset/test"
24
 
@@ -32,13 +34,13 @@ translator = loader.translator_pipeline
32
  stt_model = loader.stt_pipeline
33
  zsl_model = loader.zsl_pipeline
34
  gender_classifier = loader.gender_classifier
35
- gender_model = loader.cnn_model # Custom CNN
36
 
37
  # Clustering Dependencies
38
  import pandas as pd
39
  from sklearn.cluster import KMeans, DBSCAN
40
  import matplotlib
41
- matplotlib.use('Agg') # Non-interactive backend
42
  import matplotlib.pyplot as plt
43
  import io
44
  import base64
@@ -48,402 +50,358 @@ from sklearn.preprocessing import StandardScaler
48
  from mlxtend.frequent_patterns import apriori, association_rules
49
  from mlxtend.preprocessing import TransactionEncoder
50
 
51
- # ---------------- ROUTES ---------------- #
 
 
52
 
53
- @app.route('/')
54
- def index():
55
- return render_template('index.html')
56
 
57
  # -------- GENDER CLASSIFICATION -------- #
58
- @app.route('/gender', methods=['GET', 'POST'])
59
  def gender():
60
- result = ""
61
- if request.method == 'POST':
62
- if 'image' not in request.files:
63
- return render_template('gender.html', result="No image uploaded")
64
-
65
- file = request.files['image']
66
- if file.filename == '':
67
- return render_template('gender.html', result="No image selected")
68
-
69
- if file:
70
- # Save file temporarily (use secure filename)
71
- filename = secure_filename(file.filename)
72
- filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
73
- file.save(filepath)
74
-
75
- if gender_classifier:
76
- try:
77
- img = Image.open(filepath)
78
- results = gender_classifier(img)
79
- # Extract the top result
80
- result = results[0]['label'].capitalize()
81
- print(f"Gender Classification Result: {result}")
82
- except Exception as e:
83
- result = f"Error processing image with transformers: {e}"
84
- elif gender_model:
85
- try:
86
- img = Image.open(filepath).convert('RGB')
87
- img = img.resize((128, 128))
88
- img_array = np.array(img).astype(np.float32) / 255.0
89
- # Correct shape for PyTorch CNN: (batch, channels, height, width)
90
- img_tensor = torch.from_numpy(img_array).permute(2, 0, 1).unsqueeze(0)
91
-
92
- with torch.no_grad():
93
- prediction = gender_model(img_tensor)
94
- result = "Male" if prediction.item() > 0.5 else "Female"
95
- except Exception as e:
96
- import traceback
97
- print(traceback.format_exc())
98
- result = f"Error processing image: {e}"
99
- else:
100
- result = "Gender model is not loaded (check console for details)."
101
-
102
- return render_template('gender.html', result=result)
103
 
104
  # -------- TEXT GENERATION -------- #
105
- @app.route('/textgen', methods=['GET', 'POST'])
106
  def textgen():
107
- result = ""
108
- if request.method == 'POST':
109
- text = request.form['prompt']
110
- if textgen_model:
111
- result = textgen_model(text, max_length=50)[0]['generated_text']
112
- else:
113
- result = "Text generation model not available"
114
- return render_template('textgen.html', generated_text=result)
115
 
116
  # -------- TRANSLATION -------- #
117
- @app.route('/translate', methods=['GET', 'POST'])
118
  def translate():
119
- result = ""
120
- if request.method == 'POST':
121
- text = request.form.get('text', '')
122
- if translator:
123
- result = translator(text)[0]['translation_text']
124
- else:
125
- result = "Translation model not available"
126
- return render_template('translate.html', translated_text=result)
127
 
128
- # -------- SENTIMENT (VOICE) -------- #
129
- @app.route('/sentiment', methods=['GET', 'POST'])
130
  def sentiment():
131
- result = ""
132
- if request.method == 'POST':
 
 
133
  typed_text = request.form.get('text', '').strip()
134
- audio_file = request.files.get('voice')
135
-
136
- text = ""
137
- if typed_text:
138
- text = typed_text
139
- elif audio_file:
140
- if audio_file.filename == '':
141
- return render_template('sentiment.html', result="No audio selected")
142
- audio_filename = secure_filename(audio_file.filename)
143
- audio_path = os.path.join(app.config['UPLOAD_FOLDER'], audio_filename)
144
- audio_file.save(audio_path)
145
-
146
- if stt_model is None:
147
- return render_template('sentiment.html', result="STT model not available")
148
-
149
- try:
150
- audio_array, sampling_rate = librosa.load(audio_path, sr=16000)
151
- # Ensure the audio array is 1D and float32
152
- audio_array = audio_array.astype(np.float32)
153
- stt_result = stt_model(audio_array)
154
- text = stt_result.get('text', '').strip()
155
- if not text:
156
- return render_template('sentiment.html', result="Could not understand audio")
157
- except Exception as e:
158
- return render_template('sentiment.html', result=f"STT processing error: {str(e)}")
159
- else:
160
- return render_template('sentiment.html', result="No input provided")
161
 
162
- # Sentiment Analysis Logic
163
- if sentiment_model is None:
164
- result = f"Analyzed: {text} | Status: Sentiment model not available"
165
- else:
166
- try:
167
- sentiment_data = sentiment_model(text)[0]
168
- label = sentiment_data.get('label', 'Unknown').capitalize()
169
- score = round(sentiment_data.get('score', 0) * 100, 1)
170
-
171
- # Question Detection
172
- questions_words = ["who", "what", "where", "when", "why", "how", "is", "are", "do", "does", "can", "could", "would", "should"]
173
- is_question = text.strip().endswith("?") or any(text.lower().startswith(q + " ") for q in questions_words)
174
-
175
- type_str = "Question" if is_question else "Statement"
176
- result = f"Text: \"{text}\" | Type: {type_str} | Sentiment: {label} (Confidence: {score}%)"
177
- except Exception as e:
178
- result = f"Sentiment analysis failed: {str(e)}"
179
-
180
- return render_template('sentiment.html', result=result)
181
-
182
- # -------- QUESTION ANSWERING (VOICE → VOICE) -------- #
183
- @app.route('/qa', methods=['GET', 'POST'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  def qa():
185
- answer = ""
186
- context = ""
187
- question_text = ""
188
- if request.method == 'POST':
189
  context = request.form.get('context', '')
 
190
  audio_file = request.files.get('voice')
191
- typed_question = request.form.get('question', '').strip()
192
-
193
- if typed_question:
194
- question_text = typed_question
195
- elif audio_file:
196
- if audio_file.filename != '':
197
- audio_filename = secure_filename(audio_file.filename)
198
- audio_path = os.path.join(app.config['UPLOAD_FOLDER'], audio_filename)
199
- audio_file.save(audio_path)
200
-
201
- try:
202
- audio_array, sampling_rate = librosa.load(audio_path, sr=16000)
203
- audio_array = audio_array.astype(np.float32)
204
- stt_result = stt_model(audio_array)
205
- question_text = stt_result.get('text', '').strip()
206
- except Exception as e:
207
- print(f"STT Error in QA: {e}")
208
- answer = "Error processing your voice question."
209
-
210
- if not answer and question_text and context:
211
- if qa_model is None:
212
- answer = "Question-answering model not available"
213
- else:
214
- try:
215
- result = qa_model(question=question_text, context=context)
216
- answer = result.get('answer', str(result))
217
-
218
- try:
219
- tts = gTTS(answer)
220
- tts.save(os.path.join('static', 'answer.mp3'))
221
- except Exception as e:
222
- print(f"TTS failed: {e}")
223
- except Exception as e:
224
- answer = f"QA model error: {e}"
225
- elif not answer:
226
- answer = "Please provide both context and a question (typed or voice)."
227
-
228
- return render_template('qa.html', answer=answer, context=context, question=question_text)
229
 
230
- # -------- ZERO-SHOT LEARNING -------- #
231
- @app.route('/zsl', methods=['GET', 'POST'])
232
- def zsl():
233
- result = None
234
- if request.method == 'POST':
235
- text = request.form.get('text', '')
236
- labels = request.form.get('labels', '')
237
-
238
- if not text or not labels:
239
- return render_template('zsl.html', error="Both text and labels are required.")
240
-
241
- candidate_labels = [l.strip() for l in labels.split(',') if l.strip()]
242
-
243
- if zsl_model is None:
244
- return render_template('zsl.html', error="ZSL model not available.")
245
-
246
  try:
247
- output = zsl_model(text, candidate_labels=candidate_labels)
248
- # Find the index of the label with the highest score
249
- best_idx = np.argmax(output['scores'])
250
- result = {
251
- 'label': output['labels'][0], # BART-MNLI returns sorted
252
- 'score': round(output['scores'][0] * 100, 2),
253
- 'all_results': zip(output['labels'], [round(s * 100, 2) for s in output['scores']])
254
- }
255
  except Exception as e:
256
- return render_template('zsl.html', error=f"ZSL error: {str(e)}")
257
-
258
- return render_template('zsl.html', result=result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
 
261
  # -------- K-MEANS CLUSTERING -------- #
262
- @app.route('/clustering', methods=['GET', 'POST'])
263
  def clustering():
264
- plot_url = None
265
- cluster_info = None
266
- if request.method == 'POST':
267
- if 'file' not in request.files:
268
- return render_template('clustering.html', error="No file uploaded")
269
-
270
- file = request.files['file']
271
- n_clusters = int(request.form.get('clusters', 3))
272
-
273
- if file.filename == '':
274
- return render_template('clustering.html', error="No file selected")
275
-
276
- try:
277
- if file.filename.endswith('.csv'):
278
- df = pd.read_csv(file)
279
- else:
280
- df = pd.read_excel(file)
281
-
282
- # Keep only numeric columns
283
- numeric_df = df.select_dtypes(include=[np.number])
284
-
285
- if numeric_df.shape[1] < 2:
286
- return render_template('clustering.html', error="Dataset must have at least 2 numeric columns for clustering.")
287
-
288
- # Basic cleaning
289
- numeric_df = numeric_df.dropna()
290
-
291
- # K-Means
292
- kmeans = KMeans(n_clusters=n_clusters, random_state=42)
293
- df['Cluster'] = kmeans.fit_predict(numeric_df)
294
-
295
- # Create Plot (using first two numeric columns)
296
- plt.figure(figsize=(10, 6))
297
- scatter = plt.scatter(numeric_df.iloc[:, 0], numeric_df.iloc[:, 1], c=df['Cluster'], cmap='viridis', alpha=0.6)
298
- plt.colorbar(scatter, label='Cluster')
299
- plt.title(f'K-Means Clustering (K={n_clusters})')
300
- plt.xlabel(numeric_df.columns[0])
301
- plt.ylabel(numeric_df.columns[1])
302
- plt.grid(True, alpha=0.3)
303
-
304
- # Save plot to base64
305
- img = io.BytesIO()
306
- plt.savefig(img, format='png', bbox_inches='tight', transparent=True)
307
- img.seek(0)
308
- plot_url = base64.b64encode(img.getvalue()).decode()
309
- plt.close()
310
-
311
- # Cluster stats
312
- cluster_info = df.groupby('Cluster').size().to_dict()
313
-
314
- except Exception as e:
315
- return render_template('clustering.html', error=f"Clustering error: {str(e)}")
316
-
317
- return render_template('clustering.html', plot_url=plot_url, cluster_info=cluster_info)
318
 
319
  # -------- DBSCAN CLUSTERING -------- #
320
- @app.route('/dbscan', methods=['GET', 'POST'])
321
  def dbscan():
322
- plot_url = None
323
- cluster_info = None
324
- if request.method == 'POST':
325
- if 'file' not in request.files:
326
- return render_template('dbscan.html', error="No file uploaded")
327
-
328
- file = request.files['file']
329
- eps = float(request.form.get('eps', 0.5))
330
- min_samples = int(request.form.get('min_samples', 5))
331
-
332
- if file.filename == '':
333
- return render_template('dbscan.html', error="No file selected")
334
-
335
- try:
336
- if file.filename.endswith('.csv'):
337
- df = pd.read_csv(file)
338
- else:
339
- df = pd.read_excel(file)
340
-
341
- # Keep only numeric columns
342
- numeric_df = df.select_dtypes(include=[np.number])
343
-
344
- if numeric_df.shape[1] < 2:
345
- return render_template('dbscan.html', error="Dataset must have at least 2 numeric columns for clustering.")
346
-
347
- # Basic cleaning
348
- numeric_df = numeric_df.dropna()
349
-
350
- # DBSCAN with Scaling
351
- scaler = StandardScaler()
352
- scaled_data = scaler.fit_transform(numeric_df)
353
-
354
- dbscan_model = DBSCAN(eps=eps, min_samples=min_samples)
355
- df['Cluster'] = dbscan_model.fit_predict(scaled_data)
356
-
357
- # Create Plot
358
- plt.figure(figsize=(10, 6))
359
- scatter = plt.scatter(numeric_df.iloc[:, 0], numeric_df.iloc[:, 1], c=df['Cluster'], cmap='viridis', alpha=0.6)
360
- plt.colorbar(scatter, label='Cluster')
361
- plt.title(f'DBSCAN Clustering (eps={eps}, min_samples={min_samples}) - Scaled')
362
- plt.xlabel(numeric_df.columns[0])
363
- plt.ylabel(numeric_df.columns[1])
364
- plt.grid(True, alpha=0.3)
365
-
366
- # Save plot to base64
367
- img = io.BytesIO()
368
- plt.savefig(img, format='png', bbox_inches='tight', transparent=True)
369
- img.seek(0)
370
- plot_url = base64.b64encode(img.getvalue()).decode()
371
- plt.close()
372
-
373
- # Cluster stats
374
- cluster_info = df.groupby('Cluster').size().to_dict()
375
-
376
- except Exception as e:
377
- return render_template('dbscan.html', error=f"DBSCAN error: {str(e)}")
378
-
379
- return render_template('dbscan.html', plot_url=plot_url, cluster_info=cluster_info)
380
 
381
  # -------- A-PRIORI ASSOCIATION RULES -------- #
382
- @app.route('/apriori', methods=['GET', 'POST'])
383
  def apriori_route():
384
- rules_html = None
385
- if request.method == 'POST':
386
- if 'file' not in request.files:
387
- return render_template('apriori.html', error="No file uploaded")
388
-
389
- file = request.files['file']
390
- min_support = float(request.form.get('min_support', 0.1))
391
- min_threshold = float(request.form.get('min_threshold', 0.7))
392
- metric = request.form.get('metric', 'lift')
393
- has_header = request.form.get('has_header') == 'on'
394
-
395
- if file.filename == '':
396
- return render_template('apriori.html', error="No file selected")
397
-
398
- try:
399
- if file.filename.endswith('.csv'):
400
- df = pd.read_csv(file, header=0 if has_header else None)
401
- else:
402
- df = pd.read_excel(file, header=0 if has_header else None)
403
-
404
- # Convert to list of lists (transactions) - Handle nulls and whitespace
405
- transactions = []
406
- values = df.values.tolist()
407
- for row in values:
408
- # Filter out nan, None, and empty strings, and convert everything to string
409
- transaction = sorted(list(set([str(item).strip() for item in row if pd.notna(item) and str(item).strip() != ''])))
410
- if transaction:
411
- transactions.append(transaction)
412
-
413
- if not transactions:
414
- return render_template('apriori.html', error="No valid transactions found in file.")
415
-
416
- # Transaction Encoding
417
- te = TransactionEncoder()
418
- te_ary = te.fit(transactions).transform(transactions)
419
- encoded_df = pd.DataFrame(te_ary, columns=te.columns_)
420
-
421
- # Generate Frequent Itemsets
422
- frequent_itemsets = apriori(encoded_df, min_support=min_support, use_colnames=True)
423
-
424
- if frequent_itemsets.empty:
425
- return render_template('apriori.html', error="No frequent itemsets found. Try lowering min support.")
426
-
427
- # Generate Rules
428
- rules = association_rules(frequent_itemsets, metric=metric, min_threshold=min_threshold)
429
-
430
- if rules.empty:
431
- return render_template('apriori.html', error=f"No rules found for {metric} >= {min_threshold}. Try lowering threshold.")
432
-
433
- # Format rules for display
434
- rules['antecedents'] = rules['antecedents'].apply(lambda x: list(x))
435
- rules['consequents'] = rules['consequents'].apply(lambda x: list(x))
436
-
437
- # Selection of columns for display
438
- display_rules = rules[['antecedents', 'consequents', 'support', 'confidence', 'lift']]
439
- rules_html = display_rules.to_dict(orient='records')
440
-
441
- except Exception as e:
442
- import traceback
443
- print(traceback.format_exc())
444
- return render_template('apriori.html', error=f"A-priori error: {str(e)}")
445
-
446
- return render_template('apriori.html', rules=rules_html)
447
 
448
  if __name__ == '__main__':
449
- app.run(debug=True, use_reloader=False)
 
 
1
+ from flask import Flask, request, jsonify, send_from_directory
2
+ from flask_cors import CORS
3
  from werkzeug.utils import secure_filename
4
  import os
5
  import numpy as np
 
15
  except ImportError:
16
  load_model = None
17
 
18
+ app = Flask(__name__, static_folder='static')
19
+ CORS(app) # Enable CORS for React frontend
20
  app.config['UPLOAD_FOLDER'] = 'static/uploads'
21
  os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
22
 
23
+ # Dataset paths
24
  TRAIN_DIR = "/content/drive/MyDrive/AML-F24/Code/image_datset/image_datset/train"
25
  TEST_DIR = "/content/drive/MyDrive/AML-F24/Code/image_datset/image_datset/test"
26
 
 
34
  stt_model = loader.stt_pipeline
35
  zsl_model = loader.zsl_pipeline
36
  gender_classifier = loader.gender_classifier
37
+ gender_model = loader.cnn_model
38
 
39
  # Clustering Dependencies
40
  import pandas as pd
41
  from sklearn.cluster import KMeans, DBSCAN
42
  import matplotlib
43
+ matplotlib.use('Agg')
44
  import matplotlib.pyplot as plt
45
  import io
46
  import base64
 
50
  from mlxtend.frequent_patterns import apriori, association_rules
51
  from mlxtend.preprocessing import TransactionEncoder
52
 
53
+ # ============================================================
54
+ # API ROUTES
55
+ # ============================================================
56
 
57
+ @app.route('/api/health', methods=['GET'])
58
+ def health():
59
+ return jsonify({"status": "ok"})
60
 
61
  # -------- GENDER CLASSIFICATION -------- #
62
+ @app.route('/api/gender', methods=['POST'])
63
  def gender():
64
+ if 'image' not in request.files:
65
+ return jsonify({"error": "No image uploaded"}), 400
66
+
67
+ file = request.files['image']
68
+ if file.filename == '':
69
+ return jsonify({"error": "No image selected"}), 400
70
+
71
+ filename = secure_filename(file.filename)
72
+ filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
73
+ file.save(filepath)
74
+
75
+ if gender_classifier:
76
+ try:
77
+ img = Image.open(filepath)
78
+ results = gender_classifier(img)
79
+ result = results[0]['label'].capitalize()
80
+ return jsonify({"result": result})
81
+ except Exception as e:
82
+ return jsonify({"error": f"Error processing image: {e}"}), 500
83
+ elif gender_model:
84
+ try:
85
+ import torch
86
+ img = Image.open(filepath).convert('RGB')
87
+ img = img.resize((128, 128))
88
+ img_array = np.array(img).astype(np.float32) / 255.0
89
+ img_tensor = torch.from_numpy(img_array).permute(2, 0, 1).unsqueeze(0)
90
+ with torch.no_grad():
91
+ prediction = gender_model(img_tensor)
92
+ result = "Male" if prediction.item() > 0.5 else "Female"
93
+ return jsonify({"result": result})
94
+ except Exception as e:
95
+ return jsonify({"error": f"Error processing image: {e}"}), 500
96
+ else:
97
+ return jsonify({"error": "Gender model is not loaded"}), 500
 
 
 
 
 
 
 
 
 
98
 
99
  # -------- TEXT GENERATION -------- #
100
+ @app.route('/api/textgen', methods=['POST'])
101
  def textgen():
102
+ data = request.get_json()
103
+ prompt = data.get('prompt', '')
104
+ if not prompt:
105
+ return jsonify({"error": "No prompt provided"}), 400
106
+ if textgen_model:
107
+ result = textgen_model(prompt, max_length=50)[0]['generated_text']
108
+ return jsonify({"generated_text": result})
109
+ return jsonify({"error": "Text generation model not available"}), 500
110
 
111
  # -------- TRANSLATION -------- #
112
+ @app.route('/api/translate', methods=['POST'])
113
  def translate():
114
+ data = request.get_json()
115
+ text = data.get('text', '')
116
+ if not text:
117
+ return jsonify({"error": "No text provided"}), 400
118
+ if translator:
119
+ result = translator(text)[0]['translation_text']
120
+ return jsonify({"translated_text": result})
121
+ return jsonify({"error": "Translation model not available"}), 500
122
 
123
+ # -------- SENTIMENT ANALYSIS -------- #
124
+ @app.route('/api/sentiment', methods=['POST'])
125
  def sentiment():
126
+ typed_text = ''
127
+ audio_file = request.files.get('voice')
128
+
129
+ if request.content_type and 'multipart/form-data' in request.content_type:
130
  typed_text = request.form.get('text', '').strip()
131
+ else:
132
+ data = request.get_json(silent=True) or {}
133
+ typed_text = data.get('text', '').strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
+ text = ""
136
+ transcript = ""
137
+ if typed_text:
138
+ text = typed_text
139
+ elif audio_file and audio_file.filename != '':
140
+ audio_filename = secure_filename(audio_file.filename)
141
+ audio_path = os.path.join(app.config['UPLOAD_FOLDER'], audio_filename)
142
+ audio_file.save(audio_path)
143
+
144
+ if stt_model is None:
145
+ return jsonify({"error": "STT model not available"}), 500
146
+ try:
147
+ audio_array, sampling_rate = librosa.load(audio_path, sr=16000)
148
+ audio_array = audio_array.astype(np.float32)
149
+ stt_result = stt_model(audio_array)
150
+ text = stt_result.get('text', '').strip()
151
+ transcript = text
152
+ if not text:
153
+ return jsonify({"error": "Could not understand audio"}), 400
154
+ except Exception as e:
155
+ return jsonify({"error": f"STT processing error: {str(e)}"}), 500
156
+ else:
157
+ return jsonify({"error": "No input provided"}), 400
158
+
159
+ if sentiment_model is None:
160
+ return jsonify({"error": "Sentiment model not available"}), 500
161
+
162
+ try:
163
+ sentiment_data = sentiment_model(text)[0]
164
+ label = sentiment_data.get('label', 'Unknown').capitalize()
165
+ score = round(sentiment_data.get('score', 0) * 100, 1)
166
+ return jsonify({
167
+ "result": label,
168
+ "score": score,
169
+ "text": text,
170
+ "transcript": transcript
171
+ })
172
+ except Exception as e:
173
+ return jsonify({"error": f"Sentiment analysis failed: {str(e)}"}), 500
174
+
175
+ # -------- QUESTION ANSWERING -------- #
176
+ @app.route('/api/qa', methods=['POST'])
177
  def qa():
178
+ context = ''
179
+ question_text = ''
180
+
181
+ if request.content_type and 'multipart/form-data' in request.content_type:
182
  context = request.form.get('context', '')
183
+ question_text = request.form.get('question', '').strip()
184
  audio_file = request.files.get('voice')
185
+ else:
186
+ data = request.get_json(silent=True) or {}
187
+ context = data.get('context', '')
188
+ question_text = data.get('question', '').strip()
189
+ audio_file = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
+ if not question_text and audio_file and audio_file.filename != '':
192
+ audio_filename = secure_filename(audio_file.filename)
193
+ audio_path = os.path.join(app.config['UPLOAD_FOLDER'], audio_filename)
194
+ audio_file.save(audio_path)
 
 
 
 
 
 
 
 
 
 
 
 
195
  try:
196
+ audio_array, sampling_rate = librosa.load(audio_path, sr=16000)
197
+ audio_array = audio_array.astype(np.float32)
198
+ stt_result = stt_model(audio_array)
199
+ question_text = stt_result.get('text', '').strip()
 
 
 
 
200
  except Exception as e:
201
+ return jsonify({"error": f"STT Error: {e}"}), 500
202
+
203
+ if not question_text or not context:
204
+ return jsonify({"error": "Both context and question are required"}), 400
205
+
206
+ if qa_model is None:
207
+ return jsonify({"error": "QA model not available"}), 500
208
+
209
+ try:
210
+ result = qa_model(question=question_text, context=context)
211
+ answer = result.get('answer', str(result))
212
+ score = round(result.get('score', 0) * 100, 1)
213
+
214
+ audio_url = None
215
+ try:
216
+ tts = gTTS(answer)
217
+ tts.save(os.path.join('static', 'answer.mp3'))
218
+ audio_url = '/static/answer.mp3'
219
+ except Exception:
220
+ pass
221
 
222
+ return jsonify({
223
+ "answer": answer,
224
+ "score": score,
225
+ "question": question_text,
226
+ "audio_url": audio_url
227
+ })
228
+ except Exception as e:
229
+ return jsonify({"error": f"QA model error: {e}"}), 500
230
+
231
+ # -------- ZERO-SHOT LEARNING -------- #
232
+ @app.route('/api/zsl', methods=['POST'])
233
+ def zsl():
234
+ data = request.get_json()
235
+ text = data.get('text', '')
236
+ labels = data.get('labels', '')
237
+
238
+ if not text or not labels:
239
+ return jsonify({"error": "Both text and labels are required"}), 400
240
+
241
+ candidate_labels = [l.strip() for l in labels.split(',') if l.strip()]
242
+
243
+ if zsl_model is None:
244
+ return jsonify({"error": "ZSL model not available"}), 500
245
+
246
+ try:
247
+ output = zsl_model(text, candidate_labels=candidate_labels)
248
+ results = []
249
+ for label, score in zip(output['labels'], output['scores']):
250
+ results.append({"label": label, "score": round(score * 100, 2)})
251
+ return jsonify({"results": results, "best_label": output['labels'][0], "best_score": round(output['scores'][0] * 100, 2)})
252
+ except Exception as e:
253
+ return jsonify({"error": f"ZSL error: {str(e)}"}), 500
254
 
255
  # -------- K-MEANS CLUSTERING -------- #
256
+ @app.route('/api/clustering', methods=['POST'])
257
  def clustering():
258
+ if 'file' not in request.files:
259
+ return jsonify({"error": "No file uploaded"}), 400
260
+
261
+ file = request.files['file']
262
+ n_clusters = int(request.form.get('clusters', 3))
263
+
264
+ if file.filename == '':
265
+ return jsonify({"error": "No file selected"}), 400
266
+
267
+ try:
268
+ if file.filename.endswith('.csv'):
269
+ df = pd.read_csv(file)
270
+ else:
271
+ df = pd.read_excel(file)
272
+
273
+ numeric_df = df.select_dtypes(include=[np.number])
274
+ if numeric_df.shape[1] < 2:
275
+ return jsonify({"error": "Dataset must have at least 2 numeric columns"}), 400
276
+
277
+ numeric_df = numeric_df.dropna()
278
+ kmeans = KMeans(n_clusters=n_clusters, random_state=42)
279
+ df['Cluster'] = kmeans.fit_predict(numeric_df)
280
+
281
+ plt.figure(figsize=(10, 6))
282
+ scatter = plt.scatter(numeric_df.iloc[:, 0], numeric_df.iloc[:, 1], c=df['Cluster'], cmap='viridis', alpha=0.6)
283
+ plt.colorbar(scatter, label='Cluster')
284
+ plt.title(f'K-Means Clustering (K={n_clusters})')
285
+ plt.xlabel(numeric_df.columns[0])
286
+ plt.ylabel(numeric_df.columns[1])
287
+ plt.grid(True, alpha=0.3)
288
+
289
+ img = io.BytesIO()
290
+ plt.savefig(img, format='png', bbox_inches='tight', transparent=True)
291
+ img.seek(0)
292
+ plot_url = base64.b64encode(img.getvalue()).decode()
293
+ plt.close()
294
+
295
+ cluster_info = df.groupby('Cluster').size().to_dict()
296
+ return jsonify({"plot": plot_url, "cluster_info": cluster_info})
297
+ except Exception as e:
298
+ return jsonify({"error": f"Clustering error: {str(e)}"}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
  # -------- DBSCAN CLUSTERING -------- #
301
+ @app.route('/api/dbscan', methods=['POST'])
302
  def dbscan():
303
+ if 'file' not in request.files:
304
+ return jsonify({"error": "No file uploaded"}), 400
305
+
306
+ file = request.files['file']
307
+ eps = float(request.form.get('eps', 0.5))
308
+ min_samples = int(request.form.get('min_samples', 5))
309
+
310
+ if file.filename == '':
311
+ return jsonify({"error": "No file selected"}), 400
312
+
313
+ try:
314
+ if file.filename.endswith('.csv'):
315
+ df = pd.read_csv(file)
316
+ else:
317
+ df = pd.read_excel(file)
318
+
319
+ numeric_df = df.select_dtypes(include=[np.number])
320
+ if numeric_df.shape[1] < 2:
321
+ return jsonify({"error": "Dataset must have at least 2 numeric columns"}), 400
322
+
323
+ numeric_df = numeric_df.dropna()
324
+ scaler = StandardScaler()
325
+ scaled_data = scaler.fit_transform(numeric_df)
326
+
327
+ dbscan_model = DBSCAN(eps=eps, min_samples=min_samples)
328
+ df['Cluster'] = dbscan_model.fit_predict(scaled_data)
329
+
330
+ plt.figure(figsize=(10, 6))
331
+ scatter = plt.scatter(numeric_df.iloc[:, 0], numeric_df.iloc[:, 1], c=df['Cluster'], cmap='viridis', alpha=0.6)
332
+ plt.colorbar(scatter, label='Cluster')
333
+ plt.title(f'DBSCAN (eps={eps}, min_samples={min_samples})')
334
+ plt.xlabel(numeric_df.columns[0])
335
+ plt.ylabel(numeric_df.columns[1])
336
+ plt.grid(True, alpha=0.3)
337
+
338
+ img = io.BytesIO()
339
+ plt.savefig(img, format='png', bbox_inches='tight', transparent=True)
340
+ img.seek(0)
341
+ plot_url = base64.b64encode(img.getvalue()).decode()
342
+ plt.close()
343
+
344
+ cluster_info = df.groupby('Cluster').size().to_dict()
345
+ return jsonify({"plot": plot_url, "cluster_info": cluster_info})
346
+ except Exception as e:
347
+ return jsonify({"error": f"DBSCAN error: {str(e)}"}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
348
 
349
  # -------- A-PRIORI ASSOCIATION RULES -------- #
350
+ @app.route('/api/apriori', methods=['POST'])
351
  def apriori_route():
352
+ if 'file' not in request.files:
353
+ return jsonify({"error": "No file uploaded"}), 400
354
+
355
+ file = request.files['file']
356
+ min_support = float(request.form.get('min_support', 0.1))
357
+ min_threshold = float(request.form.get('min_threshold', 0.7))
358
+ metric = request.form.get('metric', 'lift')
359
+ has_header = request.form.get('has_header') == 'true'
360
+
361
+ if file.filename == '':
362
+ return jsonify({"error": "No file selected"}), 400
363
+
364
+ try:
365
+ if file.filename.endswith('.csv'):
366
+ df = pd.read_csv(file, header=0 if has_header else None)
367
+ else:
368
+ df = pd.read_excel(file, header=0 if has_header else None)
369
+
370
+ transactions = []
371
+ values = df.values.tolist()
372
+ for row in values:
373
+ transaction = sorted(list(set([str(item).strip() for item in row if pd.notna(item) and str(item).strip() != ''])))
374
+ if transaction:
375
+ transactions.append(transaction)
376
+
377
+ if not transactions:
378
+ return jsonify({"error": "No valid transactions found"}), 400
379
+
380
+ te = TransactionEncoder()
381
+ te_ary = te.fit(transactions).transform(transactions)
382
+ encoded_df = pd.DataFrame(te_ary, columns=te.columns_)
383
+
384
+ frequent_itemsets = apriori(encoded_df, min_support=min_support, use_colnames=True)
385
+ if frequent_itemsets.empty:
386
+ return jsonify({"error": "No frequent itemsets found. Try lowering min support."}), 400
387
+
388
+ rules = association_rules(frequent_itemsets, metric=metric, min_threshold=min_threshold)
389
+ if rules.empty:
390
+ return jsonify({"error": f"No rules found for {metric} >= {min_threshold}. Try lowering threshold."}), 400
391
+
392
+ rules['antecedents'] = rules['antecedents'].apply(lambda x: list(x))
393
+ rules['consequents'] = rules['consequents'].apply(lambda x: list(x))
394
+
395
+ display_rules = rules[['antecedents', 'consequents', 'support', 'confidence', 'lift']]
396
+ rules_list = display_rules.to_dict(orient='records')
397
+
398
+ return jsonify({"rules": rules_list, "count": len(rules_list)})
399
+ except Exception as e:
400
+ import traceback
401
+ print(traceback.format_exc())
402
+ return jsonify({"error": f"A-priori error: {str(e)}"}), 500
403
+
 
 
 
 
 
 
 
 
 
 
 
404
 
405
  if __name__ == '__main__':
406
+ print("Initializing models...")
407
+ app.run(debug=True, use_reloader=False, port=5000)
frontend/.gitignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ dist
12
+ dist-ssr
13
+ *.local
14
+
15
+ # Editor directories and files
16
+ .vscode/*
17
+ !.vscode/extensions.json
18
+ .idea
19
+ .DS_Store
20
+ *.suo
21
+ *.ntvs*
22
+ *.njsproj
23
+ *.sln
24
+ *.sw?
frontend/README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # React + Vite
2
+
3
+ This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
4
+
5
+ Currently, two official plugins are available:
6
+
7
+ - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh
8
+ - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
9
+
10
+ ## React Compiler
11
+
12
+ The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
13
+
14
+ ## Expanding the ESLint configuration
15
+
16
+ If you are developing a production application, we recommend using TypeScript with type-aware lint rules enabled. Check out the [TS template](https://github.com/vitejs/vite/tree/main/packages/create-vite/template-react-ts) for information on how to integrate TypeScript and [`typescript-eslint`](https://typescript-eslint.io) in your project.
frontend/eslint.config.js ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import js from '@eslint/js'
2
+ import globals from 'globals'
3
+ import reactHooks from 'eslint-plugin-react-hooks'
4
+ import reactRefresh from 'eslint-plugin-react-refresh'
5
+ import { defineConfig, globalIgnores } from 'eslint/config'
6
+
7
+ export default defineConfig([
8
+ globalIgnores(['dist']),
9
+ {
10
+ files: ['**/*.{js,jsx}'],
11
+ extends: [
12
+ js.configs.recommended,
13
+ reactHooks.configs.flat.recommended,
14
+ reactRefresh.configs.vite,
15
+ ],
16
+ languageOptions: {
17
+ ecmaVersion: 2020,
18
+ globals: globals.browser,
19
+ parserOptions: {
20
+ ecmaVersion: 'latest',
21
+ ecmaFeatures: { jsx: true },
22
+ sourceType: 'module',
23
+ },
24
+ },
25
+ rules: {
26
+ 'no-unused-vars': ['error', { varsIgnorePattern: '^[A-Z_]' }],
27
+ },
28
+ },
29
+ ])
frontend/index.html ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="/vite.svg" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>frontend</title>
8
+ </head>
9
+ <body>
10
+ <div id="root"></div>
11
+ <script type="module" src="/src/main.jsx"></script>
12
+ </body>
13
+ </html>
frontend/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
frontend/package.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "frontend",
3
+ "private": true,
4
+ "version": "0.0.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "dev": "vite",
8
+ "build": "vite build",
9
+ "lint": "eslint .",
10
+ "preview": "vite preview"
11
+ },
12
+ "dependencies": {
13
+ "axios": "^1.13.5",
14
+ "framer-motion": "^12.34.3",
15
+ "lucide-react": "^0.575.0",
16
+ "react": "^19.2.0",
17
+ "react-dom": "^19.2.0",
18
+ "react-router-dom": "^7.13.1"
19
+ },
20
+ "devDependencies": {
21
+ "@eslint/js": "^9.39.1",
22
+ "@tailwindcss/vite": "^4.2.1",
23
+ "@types/react": "^19.2.7",
24
+ "@types/react-dom": "^19.2.3",
25
+ "@vitejs/plugin-react": "^5.1.1",
26
+ "eslint": "^9.39.1",
27
+ "eslint-plugin-react-hooks": "^7.0.1",
28
+ "eslint-plugin-react-refresh": "^0.4.24",
29
+ "globals": "^16.5.0",
30
+ "tailwindcss": "^4.2.1",
31
+ "vite": "^7.3.1"
32
+ }
33
+ }
frontend/public/vite.svg ADDED
frontend/src/App.jsx ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { BrowserRouter as Router, Routes, Route } from 'react-router-dom';
2
+ import Layout from './components/Layout';
3
+ import Dashboard from './pages/Dashboard';
4
+ import GenderDiscovery from './pages/GenderDiscovery';
5
+ import TextSynthesis from './pages/TextSynthesis';
6
+ import NeuralTranslate from './pages/NeuralTranslate';
7
+ import EmpathyEngine from './pages/EmpathyEngine';
8
+ import CognitiveQA from './pages/CognitiveQA';
9
+ import ZeroShotLab from './pages/ZeroShotLab';
10
+ import DataClusters from './pages/DataClusters';
11
+ import DbscanLab from './pages/DbscanLab';
12
+ import AssociationRules from './pages/AssociationRules';
13
+ import './index.css';
14
+
15
+ function App() {
16
+ return (
17
+ <Router>
18
+ <Layout>
19
+ <Routes>
20
+ <Route path="/" element={<Dashboard />} />
21
+ <Route path="/gender" element={<GenderDiscovery />} />
22
+ <Route path="/textgen" element={<TextSynthesis />} />
23
+ <Route path="/translate" element={<NeuralTranslate />} />
24
+ <Route path="/sentiment" element={<EmpathyEngine />} />
25
+ <Route path="/qa" element={<CognitiveQA />} />
26
+ <Route path="/zsl" element={<ZeroShotLab />} />
27
+ <Route path="/clustering" element={<DataClusters />} />
28
+ <Route path="/dbscan" element={<DbscanLab />} />
29
+ <Route path="/apriori" element={<AssociationRules />} />
30
+ </Routes>
31
+ </Layout>
32
+ </Router>
33
+ );
34
+ }
35
+
36
+ export default App;
frontend/src/assets/react.svg ADDED
frontend/src/components/Layout.jsx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import Sidebar from './Sidebar';
3
+ import { Menu } from 'lucide-react';
4
+ import { motion } from 'framer-motion';
5
+
6
+ export default function Layout({ children }) {
7
+ const [sidebarOpen, setSidebarOpen] = useState(false);
8
+
9
+ return (
10
+ <div className="flex min-h-screen">
11
+ <Sidebar isOpen={sidebarOpen} onClose={() => setSidebarOpen(false)} />
12
+
13
+ <main className="flex-1 min-h-screen">
14
+ {/* Top bar */}
15
+ <header className="flex items-center gap-4 px-4 sm:px-8 py-4 lg:py-6">
16
+ <button
17
+ onClick={() => setSidebarOpen(true)}
18
+ className="lg:hidden p-2 hover:bg-white/5 rounded-xl transition-colors"
19
+ >
20
+ <Menu size={22} />
21
+ </button>
22
+ <div className="flex-1" />
23
+ <div className="flex items-center gap-2 px-3 py-1.5 rounded-full bg-emerald-500/10 border border-emerald-500/20">
24
+ <span className="w-2 h-2 rounded-full bg-emerald-500 shadow-[0_0_8px_rgba(34,197,94,0.6)]" />
25
+ <span className="text-xs font-semibold text-emerald-400">Online</span>
26
+ </div>
27
+ </header>
28
+
29
+ {/* Content */}
30
+ <motion.div
31
+ key={location.pathname}
32
+ initial={{ opacity: 0, y: 12 }}
33
+ animate={{ opacity: 1, y: 0 }}
34
+ transition={{ duration: 0.35 }}
35
+ className="px-4 sm:px-8 pb-8"
36
+ >
37
+ {children}
38
+ </motion.div>
39
+ </main>
40
+ </div>
41
+ );
42
+ }
frontend/src/components/Sidebar.jsx ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import { NavLink, useLocation } from 'react-router-dom';
3
+ import { motion, AnimatePresence } from 'framer-motion';
4
+ import {
5
+ LayoutDashboard, User, PenTool, Languages, Smile, Brain,
6
+ Target, PieChart, Braces, ShoppingCart, Menu, X
7
+ } from 'lucide-react';
8
+
9
+ const navItems = [
10
+ { path: '/', label: 'Dashboard', icon: LayoutDashboard },
11
+ { path: '/gender', label: 'Gender Discovery', icon: User },
12
+ { path: '/textgen', label: 'Text Synthesis', icon: PenTool },
13
+ { path: '/translate', label: 'Neural Translate', icon: Languages },
14
+ { path: '/sentiment', label: 'Empathy Engine', icon: Smile },
15
+ { path: '/qa', label: 'Cognitive QA', icon: Brain },
16
+ { path: '/zsl', label: 'Zero-Shot Lab', icon: Target },
17
+ { path: '/clustering', label: 'Data Clusters', icon: PieChart },
18
+ { path: '/dbscan', label: 'DBSCAN Lab', icon: Braces },
19
+ { path: '/apriori', label: 'Association Rules', icon: ShoppingCart },
20
+ ];
21
+
22
+ export default function Sidebar({ isOpen, onClose }) {
23
+ return (
24
+ <>
25
+ {/* Mobile overlay */}
26
+ <AnimatePresence>
27
+ {isOpen && (
28
+ <motion.div
29
+ initial={{ opacity: 0 }}
30
+ animate={{ opacity: 1 }}
31
+ exit={{ opacity: 0 }}
32
+ className="fixed inset-0 bg-black/60 backdrop-blur-sm z-40 lg:hidden"
33
+ onClick={onClose}
34
+ />
35
+ )}
36
+ </AnimatePresence>
37
+
38
+ {/* Sidebar */}
39
+ <aside className={`
40
+ fixed top-0 left-0 h-screen w-[280px] z-50
41
+ bg-[#0a0f1c] border-r border-white/8
42
+ flex flex-col p-6 gap-6
43
+ transition-transform duration-300 ease-[cubic-bezier(0.4,0,0.2,1)]
44
+ lg:translate-x-0 lg:static
45
+ ${isOpen ? 'translate-x-0 shadow-2xl' : '-translate-x-full'}
46
+ `}>
47
+ {/* Brand */}
48
+ <div className="flex items-center gap-3 px-2">
49
+ <div className="w-10 h-10 rounded-xl bg-gradient-to-br from-cyan-400 to-purple-500 grid place-items-center font-extrabold text-white text-sm">
50
+ AI
51
+ </div>
52
+ <span className="text-lg font-bold tracking-tight">Quantum Hub</span>
53
+ <button onClick={onClose} className="lg:hidden ml-auto p-1 hover:bg-white/5 rounded-lg">
54
+ <X size={20} />
55
+ </button>
56
+ </div>
57
+
58
+ {/* Nav */}
59
+ <nav className="flex flex-col gap-1 flex-1 overflow-y-auto">
60
+ {navItems.map(({ path, label, icon: Icon }) => (
61
+ <NavLink
62
+ key={path}
63
+ to={path}
64
+ onClick={onClose}
65
+ className={({ isActive }) => `
66
+ flex items-center gap-3 px-4 py-3 rounded-xl text-sm font-medium
67
+ transition-all duration-200
68
+ ${isActive
69
+ ? 'bg-cyan-500/10 text-cyan-400'
70
+ : 'text-slate-400 hover:bg-white/5 hover:text-slate-200'
71
+ }
72
+ `}
73
+ >
74
+ <Icon size={18} />
75
+ <span>{label}</span>
76
+ </NavLink>
77
+ ))}
78
+ </nav>
79
+
80
+ {/* Footer */}
81
+ <div className="px-3 py-3 rounded-xl bg-white/3 border border-white/5 text-center">
82
+ <p className="text-xs text-slate-500">Powered by</p>
83
+ <p className="text-xs font-semibold gradient-text">Quantum AI Engine</p>
84
+ </div>
85
+ </aside>
86
+ </>
87
+ );
88
+ }
frontend/src/components/UI.jsx ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { motion } from 'framer-motion';
2
+ import { Loader2 } from 'lucide-react';
3
+
4
+ export function PageHeader({ title, subtitle }) {
5
+ return (
6
+ <div className="mb-8">
7
+ <h1 className="text-2xl sm:text-3xl font-bold tracking-tight">{title}</h1>
8
+ {subtitle && <p className="text-slate-400 mt-1 text-sm sm:text-base">{subtitle}</p>}
9
+ </div>
10
+ );
11
+ }
12
+
13
+ export function ResultBox({ children, className = '' }) {
14
+ return (
15
+ <motion.div
16
+ initial={{ opacity: 0, y: 16 }}
17
+ animate={{ opacity: 1, y: 0 }}
18
+ className={`mt-6 p-5 sm:p-6 rounded-2xl bg-gradient-to-br from-cyan-500/5 to-purple-500/5 border border-cyan-500/10 ${className}`}
19
+ >
20
+ {children}
21
+ </motion.div>
22
+ );
23
+ }
24
+
25
+ export function ErrorBox({ message }) {
26
+ if (!message) return null;
27
+ return (
28
+ <motion.div
29
+ initial={{ opacity: 0 }}
30
+ animate={{ opacity: 1 }}
31
+ className="mt-4 p-4 rounded-xl bg-red-500/10 border border-red-500/20 text-red-400 text-sm"
32
+ >
33
+ ⚠️ {message}
34
+ </motion.div>
35
+ );
36
+ }
37
+
38
+ export function SubmitButton({ loading, children, onClick, type = 'submit' }) {
39
+ return (
40
+ <button
41
+ type={type}
42
+ onClick={onClick}
43
+ disabled={loading}
44
+ className="btn-quantum w-full py-3.5 px-6 text-base font-bold rounded-xl flex items-center justify-center gap-2 disabled:opacity-60"
45
+ >
46
+ {loading ? <Loader2 className="animate-spin" size={20} /> : null}
47
+ {loading ? 'Processing...' : children}
48
+ </button>
49
+ );
50
+ }
51
+
52
+ export function UploadZone({ accept, name, onChange, label, sublabel }) {
53
+ return (
54
+ <label className="block cursor-pointer">
55
+ <div className="border-2 border-dashed border-white/10 rounded-2xl p-8 text-center hover:border-cyan-500/30 hover:bg-cyan-500/3 transition-all">
56
+ <div className="text-3xl mb-3 gradient-text">☁️</div>
57
+ <p className="font-semibold text-sm">{label || 'Click to upload'}</p>
58
+ <p className="text-xs text-slate-500 mt-1">{sublabel || 'Drag and drop supported'}</p>
59
+ </div>
60
+ <input
61
+ type="file"
62
+ accept={accept}
63
+ name={name}
64
+ onChange={onChange}
65
+ className="hidden"
66
+ />
67
+ </label>
68
+ );
69
+ }
frontend/src/index.css ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @import "tailwindcss";
2
+
3
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800;900&display=swap');
4
+
5
+ :root {
6
+ --bg-deep: #050a14;
7
+ --sidebar-bg: #0a0f1c;
8
+ --card-bg: rgba(15, 23, 42, 0.6);
9
+ --glass-border: rgba(255, 255, 255, 0.08);
10
+ --accent-cyan: #00d4ff;
11
+ --accent-purple: #a855f7;
12
+ --text-primary: #f0f2f5;
13
+ --text-secondary: #64748b;
14
+ }
15
+
16
+ * {
17
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
18
+ }
19
+
20
+ body {
21
+ background: var(--bg-deep);
22
+ color: var(--text-primary);
23
+ overflow-x: hidden;
24
+ }
25
+
26
+ /* Custom scrollbar */
27
+ ::-webkit-scrollbar {
28
+ width: 6px;
29
+ }
30
+
31
+ ::-webkit-scrollbar-track {
32
+ background: transparent;
33
+ }
34
+
35
+ ::-webkit-scrollbar-thumb {
36
+ background: rgba(255, 255, 255, 0.1);
37
+ border-radius: 3px;
38
+ }
39
+
40
+ ::-webkit-scrollbar-thumb:hover {
41
+ background: rgba(255, 255, 255, 0.2);
42
+ }
43
+
44
+ /* Glass Card */
45
+ .glass-card {
46
+ background: var(--card-bg);
47
+ backdrop-filter: blur(20px);
48
+ -webkit-backdrop-filter: blur(20px);
49
+ border: 1px solid var(--glass-border);
50
+ border-radius: 20px;
51
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
52
+ }
53
+
54
+ .glass-card:hover {
55
+ border-color: rgba(255, 255, 255, 0.15);
56
+ }
57
+
58
+ /* Gradient text */
59
+ .gradient-text {
60
+ background: linear-gradient(135deg, #00d4ff, #a855f7);
61
+ -webkit-background-clip: text;
62
+ -webkit-text-fill-color: transparent;
63
+ background-clip: text;
64
+ }
65
+
66
+ /* Quantum button */
67
+ .btn-quantum {
68
+ background: linear-gradient(135deg, #00d4ff 0%, #a855f7 100%);
69
+ color: white;
70
+ border: none;
71
+ border-radius: 14px;
72
+ font-weight: 700;
73
+ cursor: pointer;
74
+ transition: all 0.3s;
75
+ box-shadow: 0 8px 25px -8px rgba(0, 212, 255, 0.4);
76
+ }
77
+
78
+ .btn-quantum:hover {
79
+ transform: translateY(-2px);
80
+ box-shadow: 0 12px 35px -8px rgba(0, 212, 255, 0.5);
81
+ }
82
+
83
+ .btn-quantum:active {
84
+ transform: translateY(0);
85
+ }
86
+
87
+ /* Custom input styles */
88
+ .quantum-input {
89
+ width: 100%;
90
+ background: rgba(0, 0, 0, 0.3);
91
+ border: 1px solid var(--glass-border);
92
+ border-radius: 14px;
93
+ padding: 14px 18px;
94
+ color: var(--text-primary);
95
+ font-size: 1rem;
96
+ transition: all 0.3s;
97
+ outline: none;
98
+ }
99
+
100
+ .quantum-input:focus {
101
+ border-color: var(--accent-cyan);
102
+ box-shadow: 0 0 0 4px rgba(0, 212, 255, 0.1);
103
+ }
104
+
105
+ .quantum-input::placeholder {
106
+ color: var(--text-secondary);
107
+ }
frontend/src/main.jsx ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import { StrictMode } from 'react'
2
+ import { createRoot } from 'react-dom/client'
3
+ import './index.css'
4
+ import App from './App.jsx'
5
+
6
+ createRoot(document.getElementById('root')).render(
7
+ <StrictMode>
8
+ <App />
9
+ </StrictMode>,
10
+ )
frontend/src/pages/AssociationRules.jsx ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import { ShoppingCart } from 'lucide-react';
4
+ import { PageHeader, ResultBox, ErrorBox, SubmitButton, UploadZone } from '../components/UI';
5
+
6
+ export default function AssociationRules() {
7
+ const [file, setFile] = useState(null);
8
+ const [metric, setMetric] = useState('lift');
9
+ const [minSupport, setMinSupport] = useState(0.1);
10
+ const [minThreshold, setMinThreshold] = useState(0.7);
11
+ const [hasHeader, setHasHeader] = useState(false);
12
+ const [result, setResult] = useState(null);
13
+ const [error, setError] = useState('');
14
+ const [loading, setLoading] = useState(false);
15
+
16
+ const handleSubmit = async (e) => {
17
+ e.preventDefault();
18
+ if (!file) return setError('Please upload a file');
19
+ setLoading(true); setError(''); setResult(null);
20
+
21
+ const fd = new FormData();
22
+ fd.append('file', file);
23
+ fd.append('metric', metric);
24
+ fd.append('min_support', minSupport);
25
+ fd.append('min_threshold', minThreshold);
26
+ fd.append('has_header', hasHeader);
27
+
28
+ try {
29
+ const res = await axios.post('/api/apriori', fd);
30
+ setResult(res.data);
31
+ } catch (err) {
32
+ setError(err.response?.data?.error || 'Request failed');
33
+ } finally { setLoading(false); }
34
+ };
35
+
36
+ return (
37
+ <div className="max-w-4xl mx-auto">
38
+ <PageHeader title="Association Rules" subtitle="Discover hidden relationships in transactional datasets using Apriori." />
39
+
40
+ <form onSubmit={handleSubmit} className="glass-card p-6 sm:p-8 space-y-5">
41
+ <UploadZone accept=".csv,.xlsx" onChange={(e) => setFile(e.target.files[0])} label="Upload Transaction Data" sublabel=".CSV or .XLSX (each row = transaction)" />
42
+ {file && <p className="text-sm text-cyan-400 text-center font-medium">📊 {file.name}</p>}
43
+
44
+ <div className="grid grid-cols-1 sm:grid-cols-2 gap-4">
45
+ <div>
46
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Metric</label>
47
+ <select value={metric} onChange={(e) => setMetric(e.target.value)} className="quantum-input">
48
+ <option value="lift">Lift</option>
49
+ <option value="confidence">Confidence</option>
50
+ </select>
51
+ </div>
52
+ <div>
53
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Min Support</label>
54
+ <input type="number" value={minSupport} onChange={(e) => setMinSupport(e.target.value)} step="0.01" min="0.01" max="1" className="quantum-input" />
55
+ </div>
56
+ <div>
57
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Min Threshold</label>
58
+ <input type="number" value={minThreshold} onChange={(e) => setMinThreshold(e.target.value)} step="0.1" min="0.1" className="quantum-input" />
59
+ </div>
60
+ <div className="flex items-end">
61
+ <label className="flex items-center gap-3 cursor-pointer text-sm text-slate-300">
62
+ <input type="checkbox" checked={hasHeader} onChange={(e) => setHasHeader(e.target.checked)} className="w-4 h-4 rounded" />
63
+ File has header row
64
+ </label>
65
+ </div>
66
+ </div>
67
+
68
+ <SubmitButton loading={loading}>
69
+ <ShoppingCart size={18} /> Generate Rules
70
+ </SubmitButton>
71
+ </form>
72
+
73
+ <ErrorBox message={error} />
74
+
75
+ {result && (
76
+ <ResultBox>
77
+ <p className="text-xs font-bold uppercase tracking-widest text-purple-400 mb-4">
78
+ Mining Results: {result.count} rules discovered
79
+ </p>
80
+ <div className="overflow-x-auto -mx-2">
81
+ <table className="w-full text-sm">
82
+ <thead>
83
+ <tr className="text-left text-xs uppercase tracking-wider text-slate-500 border-b border-white/10">
84
+ <th className="p-3">Antecedents (If)</th>
85
+ <th className="p-3">Consequents (Then)</th>
86
+ <th className="p-3">Support</th>
87
+ <th className="p-3">Confidence</th>
88
+ <th className="p-3">Lift</th>
89
+ </tr>
90
+ </thead>
91
+ <tbody>
92
+ {result.rules?.map((rule, i) => (
93
+ <tr key={i} className="border-b border-white/5 hover:bg-white/3">
94
+ <td className="p-3">
95
+ <span className="px-2 py-0.5 rounded text-xs font-semibold bg-blue-500/20 text-blue-400">
96
+ {rule.antecedents.join(', ')}
97
+ </span>
98
+ </td>
99
+ <td className="p-3">
100
+ <span className="px-2 py-0.5 rounded text-xs font-semibold bg-purple-500/20 text-purple-400">
101
+ {rule.consequents.join(', ')}
102
+ </span>
103
+ </td>
104
+ <td className="p-3 text-slate-300">{rule.support.toFixed(4)}</td>
105
+ <td className="p-3 text-slate-300">{rule.confidence.toFixed(4)}</td>
106
+ <td className="p-3 text-slate-300">{rule.lift.toFixed(4)}</td>
107
+ </tr>
108
+ ))}
109
+ </tbody>
110
+ </table>
111
+ </div>
112
+ </ResultBox>
113
+ )}
114
+ </div>
115
+ );
116
+ }
frontend/src/pages/CognitiveQA.jsx ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import { Brain, Volume2 } from 'lucide-react';
4
+ import { PageHeader, ResultBox, ErrorBox, SubmitButton, UploadZone } from '../components/UI';
5
+
6
+ export default function CognitiveQA() {
7
+ const [tab, setTab] = useState('text');
8
+ const [context, setContext] = useState('');
9
+ const [question, setQuestion] = useState('');
10
+ const [file, setFile] = useState(null);
11
+ const [result, setResult] = useState(null);
12
+ const [error, setError] = useState('');
13
+ const [loading, setLoading] = useState(false);
14
+
15
+ const handleSubmit = async (e) => {
16
+ e.preventDefault();
17
+ setLoading(true); setError(''); setResult(null);
18
+
19
+ try {
20
+ let res;
21
+ if (tab === 'text') {
22
+ res = await axios.post('/api/qa', { context, question });
23
+ } else {
24
+ const fd = new FormData();
25
+ fd.append('context', context);
26
+ fd.append('voice', file);
27
+ res = await axios.post('/api/qa', fd);
28
+ }
29
+ setResult(res.data);
30
+ } catch (err) {
31
+ setError(err.response?.data?.error || 'Request failed');
32
+ } finally { setLoading(false); }
33
+ };
34
+
35
+ const playAudio = () => {
36
+ if (result?.audio_url) {
37
+ const audio = new Audio(result.audio_url + '?v=' + Date.now());
38
+ audio.play();
39
+ }
40
+ };
41
+
42
+ return (
43
+ <div className="max-w-3xl mx-auto">
44
+ <PageHeader title="Cognitive QA" subtitle="Knowledge extraction engine with vocal synthesis output." />
45
+
46
+ <form onSubmit={handleSubmit} className="glass-card p-6 sm:p-8 space-y-5">
47
+ <div>
48
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Context Repository</label>
49
+ <textarea
50
+ value={context}
51
+ onChange={(e) => setContext(e.target.value)}
52
+ placeholder="Paste the reference document here..."
53
+ className="quantum-input min-h-[180px] resize-y"
54
+ required
55
+ />
56
+ </div>
57
+
58
+ <div className="flex gap-2 border-b border-white/10 pb-3">
59
+ {['text', 'voice'].map(t => (
60
+ <button
61
+ key={t}
62
+ type="button"
63
+ onClick={() => setTab(t)}
64
+ className={`px-4 py-2 rounded-xl text-sm font-semibold transition-all ${tab === t ? 'bg-cyan-500/10 text-cyan-400' : 'text-slate-400 hover:bg-white/5'
65
+ }`}
66
+ >
67
+ {t === 'text' ? 'Type Question' : 'Voice Question'}
68
+ </button>
69
+ ))}
70
+ </div>
71
+
72
+ {tab === 'text' ? (
73
+ <div>
74
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Query</label>
75
+ <input
76
+ type="text"
77
+ value={question}
78
+ onChange={(e) => setQuestion(e.target.value)}
79
+ placeholder="Ask a question about the context..."
80
+ className="quantum-input"
81
+ />
82
+ </div>
83
+ ) : (
84
+ <UploadZone accept="audio/*" onChange={(e) => setFile(e.target.files[0])} label="Record Your Query" sublabel="Upload audio for voice-to-voice QA" />
85
+ )}
86
+
87
+ <SubmitButton loading={loading}>
88
+ <Brain size={18} /> Execute Reasoning
89
+ </SubmitButton>
90
+ </form>
91
+
92
+ <ErrorBox message={error} />
93
+
94
+ {result && (
95
+ <ResultBox>
96
+ <p className="text-xs font-bold uppercase tracking-widest text-purple-400 mb-4">Reasoning Output</p>
97
+
98
+ <div className="flex flex-col sm:flex-row items-start sm:items-center gap-4 p-5 rounded-xl bg-black/20 border-l-4 border-cyan-400">
99
+ <div className="flex-1">
100
+ <span className="text-xs font-bold text-cyan-400 uppercase">Extracted Answer</span>
101
+ <p className="text-xl font-bold text-slate-100 mt-1">{result.answer}</p>
102
+ </div>
103
+ {result.audio_url && (
104
+ <button
105
+ type="button"
106
+ onClick={playAudio}
107
+ className="w-14 h-14 rounded-full bg-gradient-to-br from-cyan-500 to-purple-500 grid place-items-center text-white hover:scale-110 transition-transform flex-shrink-0"
108
+ >
109
+ <Volume2 size={22} />
110
+ </button>
111
+ )}
112
+ </div>
113
+
114
+ {result.score > 0 && (
115
+ <div className="mt-4">
116
+ <div className="h-2 bg-white/5 rounded-full overflow-hidden">
117
+ <div
118
+ className="h-full bg-gradient-to-r from-cyan-500 to-purple-500 rounded-full transition-all duration-1000"
119
+ style={{ width: `${result.score}%` }}
120
+ />
121
+ </div>
122
+ <div className="flex justify-between text-sm text-slate-400 mt-1">
123
+ <span>Confidence</span>
124
+ <span>{result.score}%</span>
125
+ </div>
126
+ </div>
127
+ )}
128
+ </ResultBox>
129
+ )}
130
+ </div>
131
+ );
132
+ }
frontend/src/pages/Dashboard.jsx ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Link } from 'react-router-dom';
2
+ import { motion } from 'framer-motion';
3
+ import {
4
+ User, PenTool, Languages, Smile, Brain,
5
+ Target, PieChart, Braces, ShoppingCart
6
+ } from 'lucide-react';
7
+ import { PageHeader } from '../components/UI';
8
+
9
+ const services = [
10
+ { path: '/gender', label: 'Gender Discovery', desc: 'Vision Transformer for high-precision gender classification.', icon: User, color: 'from-pink-500 to-rose-500' },
11
+ { path: '/textgen', label: 'Text Synthesis', desc: 'Creative language generation powered by GPT-2 architecture.', icon: PenTool, color: 'from-cyan-500 to-blue-500' },
12
+ { path: '/translate', label: 'Neural Translate', desc: 'Advanced English-to-Urdu translation using sequence models.', icon: Languages, color: 'from-emerald-500 to-teal-500' },
13
+ { path: '/sentiment', label: 'Empathy Engine', desc: 'Analyze emotional valence in text and vocal inputs.', icon: Smile, color: 'from-amber-500 to-orange-500' },
14
+ { path: '/qa', label: 'Cognitive QA', desc: 'Extract precise knowledge from context with DistilBERT.', icon: Brain, color: 'from-violet-500 to-purple-500' },
15
+ { path: '/zsl', label: 'Zero-Shot Lab', desc: 'BART-based classification for any unseen categories.', icon: Target, color: 'from-red-500 to-pink-500' },
16
+ { path: '/clustering', label: 'Data Clusters', desc: 'Automated pattern discovery using K-Means clustering.', icon: PieChart, color: 'from-sky-500 to-indigo-500' },
17
+ { path: '/dbscan', label: 'DBSCAN Lab', desc: 'Density-based clustering for complex patterns and outliers.', icon: Braces, color: 'from-lime-500 to-emerald-500' },
18
+ { path: '/apriori', label: 'Market Analytics', desc: 'Generate association rules from transactional data.', icon: ShoppingCart, color: 'from-fuchsia-500 to-violet-500' },
19
+ ];
20
+
21
+ const container = {
22
+ hidden: {},
23
+ show: { transition: { staggerChildren: 0.06 } }
24
+ };
25
+
26
+ const item = {
27
+ hidden: { opacity: 0, y: 20 },
28
+ show: { opacity: 1, y: 0, transition: { duration: 0.4 } }
29
+ };
30
+
31
+ export default function Dashboard() {
32
+ return (
33
+ <div>
34
+ <PageHeader title="Quantum Analytics" subtitle="Select a specialized AI engine to begin processing." />
35
+
36
+ <motion.div
37
+ variants={container}
38
+ initial="hidden"
39
+ animate="show"
40
+ className="grid grid-cols-1 sm:grid-cols-2 xl:grid-cols-3 gap-4 sm:gap-5"
41
+ >
42
+ {services.map((s) => (
43
+ <motion.div key={s.path} variants={item}>
44
+ <Link
45
+ to={s.path}
46
+ className="glass-card p-6 flex flex-col gap-4 group hover:translate-y-[-6px] block"
47
+ >
48
+ <div className={`w-12 h-12 rounded-xl bg-gradient-to-br ${s.color} grid place-items-center opacity-80 group-hover:opacity-100 transition-all group-hover:scale-110`}>
49
+ <s.icon size={22} className="text-white" />
50
+ </div>
51
+ <div>
52
+ <h3 className="text-lg font-bold mb-1">{s.label}</h3>
53
+ <p className="text-sm text-slate-400 leading-relaxed">{s.desc}</p>
54
+ </div>
55
+ <div className="mt-auto pt-2">
56
+ <span className="text-xs font-bold uppercase tracking-widest text-cyan-400 group-hover:underline">
57
+ Launch Engine →
58
+ </span>
59
+ </div>
60
+ </Link>
61
+ </motion.div>
62
+ ))}
63
+ </motion.div>
64
+ </div>
65
+ );
66
+ }
frontend/src/pages/DataClusters.jsx ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import { PieChart } from 'lucide-react';
4
+ import { PageHeader, ResultBox, ErrorBox, SubmitButton, UploadZone } from '../components/UI';
5
+
6
+ export default function DataClusters() {
7
+ const [file, setFile] = useState(null);
8
+ const [clusters, setClusters] = useState(3);
9
+ const [result, setResult] = useState(null);
10
+ const [error, setError] = useState('');
11
+ const [loading, setLoading] = useState(false);
12
+
13
+ const handleSubmit = async (e) => {
14
+ e.preventDefault();
15
+ if (!file) return setError('Please upload a file');
16
+ setLoading(true); setError(''); setResult(null);
17
+
18
+ const fd = new FormData();
19
+ fd.append('file', file);
20
+ fd.append('clusters', clusters);
21
+
22
+ try {
23
+ const res = await axios.post('/api/clustering', fd);
24
+ setResult(res.data);
25
+ } catch (err) {
26
+ setError(err.response?.data?.error || 'Request failed');
27
+ } finally { setLoading(false); }
28
+ };
29
+
30
+ return (
31
+ <div className="max-w-3xl mx-auto">
32
+ <PageHeader title="Data Clusters" subtitle="Unsupervised grouping of multivariate datasets using K-Means." />
33
+
34
+ <form onSubmit={handleSubmit} className="glass-card p-6 sm:p-8 space-y-5">
35
+ <UploadZone accept=".csv,.xlsx" onChange={(e) => setFile(e.target.files[0])} label="Upload Data Structure" sublabel=".CSV or .XLSX datasets" />
36
+ {file && <p className="text-sm text-cyan-400 text-center font-medium">📊 {file.name}</p>}
37
+
38
+ <div>
39
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Cluster Centroids (K)</label>
40
+ <input
41
+ type="number"
42
+ value={clusters}
43
+ onChange={(e) => setClusters(e.target.value)}
44
+ min="2" max="10"
45
+ className="quantum-input w-32"
46
+ />
47
+ </div>
48
+
49
+ <SubmitButton loading={loading}>
50
+ <PieChart size={18} /> Map Clusters
51
+ </SubmitButton>
52
+ </form>
53
+
54
+ <ErrorBox message={error} />
55
+
56
+ {result && (
57
+ <ResultBox>
58
+ <p className="text-xs font-bold uppercase tracking-widest text-purple-400 mb-4">Clustering Visualization</p>
59
+ <div className="bg-white p-4 rounded-2xl">
60
+ <img src={`data:image/png;base64,${result.plot}`} alt="Cluster Plot" className="w-full rounded-xl" />
61
+ </div>
62
+ {result.cluster_info && (
63
+ <div className="mt-5 grid grid-cols-2 sm:grid-cols-3 gap-3">
64
+ {Object.entries(result.cluster_info).map(([k, v]) => (
65
+ <div key={k} className="p-3 rounded-xl bg-white/3 border-l-3 border-purple-500">
66
+ <span className="text-xs font-bold text-purple-400 uppercase">Cluster {k}</span>
67
+ <p className="text-lg font-extrabold mt-1">{v} Entities</p>
68
+ </div>
69
+ ))}
70
+ </div>
71
+ )}
72
+ </ResultBox>
73
+ )}
74
+ </div>
75
+ );
76
+ }
frontend/src/pages/DbscanLab.jsx ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import { Braces } from 'lucide-react';
4
+ import { PageHeader, ResultBox, ErrorBox, SubmitButton, UploadZone } from '../components/UI';
5
+
6
+ export default function DbscanLab() {
7
+ const [file, setFile] = useState(null);
8
+ const [eps, setEps] = useState(0.5);
9
+ const [minSamples, setMinSamples] = useState(5);
10
+ const [result, setResult] = useState(null);
11
+ const [error, setError] = useState('');
12
+ const [loading, setLoading] = useState(false);
13
+
14
+ const handleSubmit = async (e) => {
15
+ e.preventDefault();
16
+ if (!file) return setError('Please upload a file');
17
+ setLoading(true); setError(''); setResult(null);
18
+
19
+ const fd = new FormData();
20
+ fd.append('file', file);
21
+ fd.append('eps', eps);
22
+ fd.append('min_samples', minSamples);
23
+
24
+ try {
25
+ const res = await axios.post('/api/dbscan', fd);
26
+ setResult(res.data);
27
+ } catch (err) {
28
+ setError(err.response?.data?.error || 'Request failed');
29
+ } finally { setLoading(false); }
30
+ };
31
+
32
+ return (
33
+ <div className="max-w-3xl mx-auto">
34
+ <PageHeader title="DBSCAN Lab" subtitle="Density-based clustering to identify complex patterns and outliers." />
35
+
36
+ <form onSubmit={handleSubmit} className="glass-card p-6 sm:p-8 space-y-5">
37
+ <UploadZone accept=".csv,.xlsx" onChange={(e) => setFile(e.target.files[0])} label="Upload Data Structure" sublabel=".CSV or .XLSX datasets" />
38
+ {file && <p className="text-sm text-cyan-400 text-center font-medium">📊 {file.name}</p>}
39
+
40
+ <div className="grid grid-cols-1 sm:grid-cols-2 gap-4">
41
+ <div>
42
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Epsilon (eps)</label>
43
+ <input type="number" value={eps} onChange={(e) => setEps(e.target.value)} step="0.01" min="0.01" className="quantum-input" />
44
+ </div>
45
+ <div>
46
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Min Samples</label>
47
+ <input type="number" value={minSamples} onChange={(e) => setMinSamples(e.target.value)} min="1" className="quantum-input" />
48
+ </div>
49
+ </div>
50
+
51
+ <SubmitButton loading={loading}>
52
+ <Braces size={18} /> Run DBSCAN
53
+ </SubmitButton>
54
+ </form>
55
+
56
+ <ErrorBox message={error} />
57
+
58
+ {result && (
59
+ <ResultBox>
60
+ <p className="text-xs font-bold uppercase tracking-widest text-purple-400 mb-4">DBSCAN Visualization</p>
61
+ <div className="bg-white p-4 rounded-2xl">
62
+ <img src={`data:image/png;base64,${result.plot}`} alt="DBSCAN Plot" className="w-full rounded-xl" />
63
+ </div>
64
+ {result.cluster_info && (
65
+ <div className="mt-5 grid grid-cols-2 sm:grid-cols-3 gap-3">
66
+ {Object.entries(result.cluster_info).map(([k, v]) => (
67
+ <div key={k} className="p-3 rounded-xl bg-white/3 border-l-3 border-purple-500">
68
+ <span className="text-xs font-bold text-purple-400 uppercase">{k === '-1' ? 'Noise' : `Cluster ${k}`}</span>
69
+ <p className="text-lg font-extrabold mt-1">{v} Entities</p>
70
+ </div>
71
+ ))}
72
+ </div>
73
+ )}
74
+ </ResultBox>
75
+ )}
76
+ </div>
77
+ );
78
+ }
frontend/src/pages/EmpathyEngine.jsx ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import { Smile, Frown, Meh, Mic } from 'lucide-react';
4
+ import { PageHeader, ResultBox, ErrorBox, SubmitButton, UploadZone } from '../components/UI';
5
+
6
+ export default function EmpathyEngine() {
7
+ const [tab, setTab] = useState('text');
8
+ const [text, setText] = useState('');
9
+ const [file, setFile] = useState(null);
10
+ const [result, setResult] = useState(null);
11
+ const [error, setError] = useState('');
12
+ const [loading, setLoading] = useState(false);
13
+
14
+ const handleSubmit = async (e) => {
15
+ e.preventDefault();
16
+ setLoading(true); setError(''); setResult(null);
17
+
18
+ try {
19
+ let res;
20
+ if (tab === 'text') {
21
+ res = await axios.post('/api/sentiment', { text });
22
+ } else {
23
+ const fd = new FormData();
24
+ fd.append('voice', file);
25
+ res = await axios.post('/api/sentiment', fd);
26
+ }
27
+ setResult(res.data);
28
+ } catch (err) {
29
+ setError(err.response?.data?.error || 'Request failed');
30
+ } finally { setLoading(false); }
31
+ };
32
+
33
+ const sentimentIcon = (label) => {
34
+ const l = (label || '').toLowerCase();
35
+ if (l === 'positive') return <Smile size={48} className="text-emerald-400" />;
36
+ if (l === 'negative') return <Frown size={48} className="text-red-400" />;
37
+ return <Meh size={48} className="text-cyan-400" />;
38
+ };
39
+
40
+ const sentimentColor = (label) => {
41
+ const l = (label || '').toLowerCase();
42
+ if (l === 'positive') return 'text-emerald-400';
43
+ if (l === 'negative') return 'text-red-400';
44
+ return 'text-cyan-400';
45
+ };
46
+
47
+ return (
48
+ <div className="max-w-2xl mx-auto">
49
+ <PageHeader title="Empathy Engine" subtitle="Contextual sentiment analysis for text and vocal recordings." />
50
+
51
+ <form onSubmit={handleSubmit} className="glass-card p-6 sm:p-8 space-y-5">
52
+ {/* Tabs */}
53
+ <div className="flex gap-2 border-b border-white/10 pb-3">
54
+ {['text', 'voice'].map(t => (
55
+ <button
56
+ key={t}
57
+ type="button"
58
+ onClick={() => setTab(t)}
59
+ className={`px-4 py-2 rounded-xl text-sm font-semibold transition-all ${tab === t ? 'bg-cyan-500/10 text-cyan-400' : 'text-slate-400 hover:bg-white/5'
60
+ }`}
61
+ >
62
+ {t === 'text' ? 'Text Analysis' : 'Vocal Analysis'}
63
+ </button>
64
+ ))}
65
+ </div>
66
+
67
+ {tab === 'text' ? (
68
+ <div>
69
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Input Text</label>
70
+ <textarea
71
+ value={text}
72
+ onChange={(e) => setText(e.target.value)}
73
+ placeholder="Explain how you're feeling..."
74
+ className="quantum-input min-h-[140px] resize-y"
75
+ />
76
+ </div>
77
+ ) : (
78
+ <div>
79
+ <UploadZone accept="audio/*" onChange={(e) => setFile(e.target.files[0])} label="Upload Voice Recording" sublabel="WAV or MP3 format" />
80
+ {file && <p className="text-sm text-cyan-400 text-center mt-2 font-medium">🎙 {file.name}</p>}
81
+ </div>
82
+ )}
83
+
84
+ <SubmitButton loading={loading}>
85
+ <Mic size={18} /> Analyze Sentiment
86
+ </SubmitButton>
87
+ </form>
88
+
89
+ <ErrorBox message={error} />
90
+
91
+ {result && (
92
+ <ResultBox>
93
+ <p className="text-xs font-bold uppercase tracking-widest text-purple-400 mb-4">Engine Output</p>
94
+ {result.transcript && (
95
+ <div className="mb-4 p-3 rounded-xl bg-black/20 text-sm">
96
+ <span className="text-xs font-bold text-cyan-400 uppercase">Transcription</span>
97
+ <p className="mt-1 text-slate-300">"{result.transcript}"</p>
98
+ </div>
99
+ )}
100
+ <div className="flex items-center gap-6 p-4 rounded-xl bg-white/3">
101
+ <div className="flex-1">
102
+ <span className="text-sm text-slate-400">Detected Sentiment</span>
103
+ <p className={`text-3xl font-extrabold capitalize ${sentimentColor(result.result)}`}>
104
+ {result.result}
105
+ </p>
106
+ <span className="text-sm text-slate-500">{result.score}% confidence</span>
107
+ </div>
108
+ {sentimentIcon(result.result)}
109
+ </div>
110
+ </ResultBox>
111
+ )}
112
+ </div>
113
+ );
114
+ }
frontend/src/pages/GenderDiscovery.jsx ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import { Upload } from 'lucide-react';
4
+ import { PageHeader, ResultBox, ErrorBox, SubmitButton, UploadZone } from '../components/UI';
5
+
6
+ export default function GenderDiscovery() {
7
+ const [file, setFile] = useState(null);
8
+ const [preview, setPreview] = useState(null);
9
+ const [result, setResult] = useState('');
10
+ const [error, setError] = useState('');
11
+ const [loading, setLoading] = useState(false);
12
+
13
+ const handleFile = (e) => {
14
+ const f = e.target.files[0];
15
+ if (f) {
16
+ setFile(f);
17
+ setPreview(URL.createObjectURL(f));
18
+ }
19
+ };
20
+
21
+ const handleSubmit = async (e) => {
22
+ e.preventDefault();
23
+ if (!file) return setError('Please select an image');
24
+ setLoading(true); setError(''); setResult('');
25
+
26
+ const fd = new FormData();
27
+ fd.append('image', file);
28
+
29
+ try {
30
+ const res = await axios.post('/api/gender', fd);
31
+ setResult(res.data.result);
32
+ } catch (err) {
33
+ setError(err.response?.data?.error || 'Request failed');
34
+ } finally { setLoading(false); }
35
+ };
36
+
37
+ return (
38
+ <div className="max-w-2xl mx-auto">
39
+ <PageHeader title="Gender Discovery" subtitle="Upload a visual specimen for neural gender classification." />
40
+
41
+ <form onSubmit={handleSubmit} className="glass-card p-6 sm:p-8 space-y-5">
42
+ <UploadZone accept="image/*" name="image" onChange={handleFile} label="Upload Image" sublabel="PNG, JPG or WEBP (max 10MB)" />
43
+
44
+ {preview && (
45
+ <div className="rounded-2xl overflow-hidden border border-white/10">
46
+ <img src={preview} alt="Preview" className="w-full h-56 object-cover" />
47
+ </div>
48
+ )}
49
+
50
+ {file && <p className="text-sm text-cyan-400 text-center font-medium">📎 {file.name}</p>}
51
+
52
+ <SubmitButton loading={loading}>
53
+ <Upload size={18} /> Run Discovery Engine
54
+ </SubmitButton>
55
+ </form>
56
+
57
+ <ErrorBox message={error} />
58
+
59
+ {result && (
60
+ <ResultBox>
61
+ <p className="text-xs font-bold uppercase tracking-widest text-purple-400 mb-2">Engine Output</p>
62
+ <div className="flex justify-between items-center">
63
+ <span className="text-slate-400">Detected Gender</span>
64
+ <span className="text-3xl font-extrabold">{result}</span>
65
+ </div>
66
+ </ResultBox>
67
+ )}
68
+ </div>
69
+ );
70
+ }
frontend/src/pages/NeuralTranslate.jsx ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import { Languages, Copy, Check } from 'lucide-react';
4
+ import { PageHeader, ResultBox, ErrorBox, SubmitButton } from '../components/UI';
5
+
6
+ export default function NeuralTranslate() {
7
+ const [text, setText] = useState('');
8
+ const [result, setResult] = useState('');
9
+ const [error, setError] = useState('');
10
+ const [loading, setLoading] = useState(false);
11
+ const [copied, setCopied] = useState(false);
12
+
13
+ const handleSubmit = async (e) => {
14
+ e.preventDefault();
15
+ if (!text.trim()) return;
16
+ setLoading(true); setError(''); setResult('');
17
+ try {
18
+ const res = await axios.post('/api/translate', { text });
19
+ setResult(res.data.translated_text);
20
+ } catch (err) {
21
+ setError(err.response?.data?.error || 'Request failed');
22
+ } finally { setLoading(false); }
23
+ };
24
+
25
+ const copyText = () => {
26
+ navigator.clipboard.writeText(result);
27
+ setCopied(true);
28
+ setTimeout(() => setCopied(false), 2000);
29
+ };
30
+
31
+ return (
32
+ <div className="max-w-2xl mx-auto">
33
+ <PageHeader title="Neural Translate" subtitle="Advanced English-to-Urdu translation using sequence models." />
34
+
35
+ <form onSubmit={handleSubmit} className="glass-card p-6 sm:p-8 space-y-5">
36
+ <div>
37
+ <label className="block text-sm font-semibold text-slate-300 mb-2">English Text</label>
38
+ <textarea
39
+ value={text}
40
+ onChange={(e) => setText(e.target.value)}
41
+ placeholder="Type or paste English text here..."
42
+ className="quantum-input min-h-[140px] resize-y"
43
+ />
44
+ </div>
45
+ <SubmitButton loading={loading}>
46
+ <Languages size={18} /> Translate to Urdu
47
+ </SubmitButton>
48
+ </form>
49
+
50
+ <ErrorBox message={error} />
51
+
52
+ {result && (
53
+ <ResultBox>
54
+ <div className="flex items-center justify-between mb-3">
55
+ <p className="text-xs font-bold uppercase tracking-widest text-purple-400">Urdu Translation</p>
56
+ <button onClick={copyText} className="flex items-center gap-1.5 text-xs text-slate-400 hover:text-white transition-colors bg-white/5 px-3 py-1.5 rounded-lg">
57
+ {copied ? <Check size={14} /> : <Copy size={14} />}
58
+ {copied ? 'Copied!' : 'Copy'}
59
+ </button>
60
+ </div>
61
+ <p className="text-xl text-slate-200 leading-relaxed text-right font-medium" dir="rtl">{result}</p>
62
+ </ResultBox>
63
+ )}
64
+ </div>
65
+ );
66
+ }
frontend/src/pages/TextSynthesis.jsx ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import { PenTool, Copy, Check } from 'lucide-react';
4
+ import { PageHeader, ResultBox, ErrorBox, SubmitButton } from '../components/UI';
5
+
6
+ export default function TextSynthesis() {
7
+ const [prompt, setPrompt] = useState('');
8
+ const [result, setResult] = useState('');
9
+ const [error, setError] = useState('');
10
+ const [loading, setLoading] = useState(false);
11
+ const [copied, setCopied] = useState(false);
12
+
13
+ const handleSubmit = async (e) => {
14
+ e.preventDefault();
15
+ if (!prompt.trim()) return;
16
+ setLoading(true); setError(''); setResult('');
17
+ try {
18
+ const res = await axios.post('/api/textgen', { prompt });
19
+ setResult(res.data.generated_text);
20
+ } catch (err) {
21
+ setError(err.response?.data?.error || 'Request failed');
22
+ } finally { setLoading(false); }
23
+ };
24
+
25
+ const copyText = () => {
26
+ navigator.clipboard.writeText(result);
27
+ setCopied(true);
28
+ setTimeout(() => setCopied(false), 2000);
29
+ };
30
+
31
+ return (
32
+ <div className="max-w-2xl mx-auto">
33
+ <PageHeader title="Text Synthesis" subtitle="Creative language generation powered by GPT-2 architecture." />
34
+
35
+ <form onSubmit={handleSubmit} className="glass-card p-6 sm:p-8 space-y-5">
36
+ <div>
37
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Synthesis Prompt</label>
38
+ <textarea
39
+ value={prompt}
40
+ onChange={(e) => setPrompt(e.target.value)}
41
+ placeholder="Enter a seed sentence for the AI to expand upon..."
42
+ className="quantum-input min-h-[140px] resize-y"
43
+ />
44
+ </div>
45
+ <SubmitButton loading={loading}>
46
+ <PenTool size={18} /> Synthesize Text
47
+ </SubmitButton>
48
+ </form>
49
+
50
+ <ErrorBox message={error} />
51
+
52
+ {result && (
53
+ <ResultBox>
54
+ <div className="flex items-center justify-between mb-3">
55
+ <p className="text-xs font-bold uppercase tracking-widest text-purple-400">Generated Output</p>
56
+ <button onClick={copyText} className="flex items-center gap-1.5 text-xs text-slate-400 hover:text-white transition-colors bg-white/5 px-3 py-1.5 rounded-lg">
57
+ {copied ? <Check size={14} /> : <Copy size={14} />}
58
+ {copied ? 'Copied!' : 'Copy'}
59
+ </button>
60
+ </div>
61
+ <p className="text-slate-200 leading-relaxed">{result}</p>
62
+ </ResultBox>
63
+ )}
64
+ </div>
65
+ );
66
+ }
frontend/src/pages/ZeroShotLab.jsx ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import axios from 'axios';
3
+ import { Target } from 'lucide-react';
4
+ import { PageHeader, ResultBox, ErrorBox, SubmitButton } from '../components/UI';
5
+
6
+ export default function ZeroShotLab() {
7
+ const [text, setText] = useState('');
8
+ const [labels, setLabels] = useState('');
9
+ const [result, setResult] = useState(null);
10
+ const [error, setError] = useState('');
11
+ const [loading, setLoading] = useState(false);
12
+
13
+ const handleSubmit = async (e) => {
14
+ e.preventDefault();
15
+ if (!text.trim() || !labels.trim()) return;
16
+ setLoading(true); setError(''); setResult(null);
17
+ try {
18
+ const res = await axios.post('/api/zsl', { text, labels });
19
+ setResult(res.data);
20
+ } catch (err) {
21
+ setError(err.response?.data?.error || 'Request failed');
22
+ } finally { setLoading(false); }
23
+ };
24
+
25
+ return (
26
+ <div className="max-w-2xl mx-auto">
27
+ <PageHeader title="Zero-Shot Lab" subtitle="BART-based classification for any unseen categories." />
28
+
29
+ <form onSubmit={handleSubmit} className="glass-card p-6 sm:p-8 space-y-5">
30
+ <div>
31
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Input Text</label>
32
+ <textarea
33
+ value={text}
34
+ onChange={(e) => setText(e.target.value)}
35
+ placeholder="Enter text to classify..."
36
+ className="quantum-input min-h-[120px] resize-y"
37
+ required
38
+ />
39
+ </div>
40
+ <div>
41
+ <label className="block text-sm font-semibold text-slate-300 mb-2">Candidate Labels</label>
42
+ <input
43
+ type="text"
44
+ value={labels}
45
+ onChange={(e) => setLabels(e.target.value)}
46
+ placeholder="politics, sports, technology, health..."
47
+ className="quantum-input"
48
+ required
49
+ />
50
+ <p className="text-xs text-slate-500 mt-1">Separate labels with commas</p>
51
+ </div>
52
+ <SubmitButton loading={loading}>
53
+ <Target size={18} /> Classify Text
54
+ </SubmitButton>
55
+ </form>
56
+
57
+ <ErrorBox message={error} />
58
+
59
+ {result && (
60
+ <ResultBox>
61
+ <p className="text-xs font-bold uppercase tracking-widest text-purple-400 mb-4">Classification Results</p>
62
+ <div className="mb-4 p-4 rounded-xl bg-black/20 text-center">
63
+ <span className="text-sm text-slate-400">Best Match</span>
64
+ <p className="text-2xl font-extrabold text-cyan-400 mt-1">{result.best_label}</p>
65
+ <span className="text-sm text-slate-500">{result.best_score}% confidence</span>
66
+ </div>
67
+ <div className="space-y-2">
68
+ {result.results?.map((r, i) => (
69
+ <div key={i} className="flex items-center gap-3">
70
+ <span className="text-sm text-slate-300 w-28 truncate capitalize">{r.label}</span>
71
+ <div className="flex-1 h-2 bg-white/5 rounded-full overflow-hidden">
72
+ <div
73
+ className="h-full bg-gradient-to-r from-cyan-500 to-purple-500 rounded-full transition-all duration-700"
74
+ style={{ width: `${r.score}%` }}
75
+ />
76
+ </div>
77
+ <span className="text-xs text-slate-400 w-12 text-right">{r.score}%</span>
78
+ </div>
79
+ ))}
80
+ </div>
81
+ </ResultBox>
82
+ )}
83
+ </div>
84
+ );
85
+ }
frontend/vite.config.js ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig } from 'vite'
2
+ import react from '@vitejs/plugin-react'
3
+ import tailwindcss from '@tailwindcss/vite'
4
+
5
+ export default defineConfig({
6
+ plugins: [react(), tailwindcss()],
7
+ server: {
8
+ port: 5173,
9
+ proxy: {
10
+ '/api': 'http://localhost:5000',
11
+ '/static': 'http://localhost:5000',
12
+ }
13
+ }
14
+ })
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  flask
 
2
  transformers
3
  torch
4
  pillow
 
1
  flask
2
+ flask-cors
3
  transformers
4
  torch
5
  pillow