Dwarakesh-V commited on
Commit
369c2da
·
1 Parent(s): 635daa5

Copy files from Github to HF spaces

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
37
+ *.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ node_modules
Dockerfile ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Node for frontend + backend
2
+ FROM node:18
3
+
4
+ # Install Python (HF already uses Debian-based images, so this works)
5
+ RUN apt-get update && apt-get install -y python3 python3-pip
6
+
7
+ # Create user
8
+ RUN useradd -m -u 1000 user
9
+ USER user
10
+ WORKDIR /home/user/app
11
+
12
+ # Copy package files first (cache-friendly)
13
+ COPY --chown=user package.json package-lock.json ./
14
+
15
+ # Install Node dependencies
16
+ RUN npm install
17
+
18
+ # Copy the rest of the repo
19
+ COPY --chown=user . .
20
+
21
+ # Build Vite frontend
22
+ RUN cd frontend && npm install && npm run build
23
+
24
+ # Move built frontend to backend's static folder
25
+ RUN cp -r frontend/dist backend/dist
26
+
27
+ # Install Python deps if any
28
+ COPY requirements.txt .
29
+ RUN pip3 install --no-cache-dir -r requirements.txt
30
+
31
+ RUN python -m nltk.downloader punkt
32
+ RUN python -m nltk.downloader punkt_tab
33
+
34
+ # Expose HF port
35
+ EXPOSE 7860
36
+
37
+ # Start your Node backend
38
+ CMD ["node", "backend/server.js"]
README.md CHANGED
@@ -1,12 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
- title: Semantic Analyzer
3
- emoji: 🔥
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- short_description: A lightweight, fast, and fully deterministic semantic engine
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
1
+ # Semantic Intent Routing Engine
2
+
3
+ A lightweight, fast, and fully deterministic alternative to traditional intent-classification systems.
4
+ Built with **React + Vite + Tailwind** on the frontend and a **Python-based semantic inference engine** on the backend.
5
+
6
+ This system routes user queries through a graph of intents using sentence embeddings, adaptive confidence logic, and retrieval-based fallbacks—no classifiers, training loops, or model deployments required.
7
+
8
+ ---
9
+
10
+ ## Features
11
+
12
+ ### **Deterministic Intent Resolution**
13
+ Resolves user queries by traversing a DAG-structured intent graph with path-level scoring instead of a single classifier.
14
+
15
+ ### **Adaptive Confidence Thresholding**
16
+ Automatically adjusts sensitivity based on query length and phrasing, improving routing stability on ambiguous inputs.
17
+
18
+ ### **Retrieval-Augmented Fallbacks**
19
+ When a query doesn’t clearly match any intent, the system fetches semantically similar candidates and recovers gracefully.
20
+
21
+ ### **Multi-Turn Context Handling**
22
+ Maintains conversational context so follow-up questions like “same as before” or “for that” route correctly without repeating selections.
23
+
24
+ ### **Hot-Swappable Intent Graph**
25
+ Intents are defined in JSON and automatically converted into a navigable graph.
26
+ Updates apply instantly—no retraining or redeployment required.
27
+
28
+ ### **Fast and Lightweight**
29
+ Runs entirely on CPU and maintains **sub-15ms routing latency** thanks to caching and optimized traversal.
30
+
31
  ---
32
+
33
+ ## Architecture Overview
34
+
35
+ - **Frontend:** React + Vite + Tailwind interface for entering queries and testing the routing behavior.
36
+ - **Backend:** Python engine using sentence-transformer embeddings and deterministic traversal logic.
37
+ - **Intent Graph:** JSON-defined structure supporting multi-parent nodes, examples, responses, and metadata.
38
+
 
39
  ---
40
 
41
+ ## Why This Exists
42
+
43
+ Most NLU systems rely on classifiers or fine-tuned models, which brings problems like:
44
+
45
+ - retraining loops
46
+ - model drift
47
+ - slow iteration cycles
48
+ - low explainability
49
+
50
+ This project avoids all of that by using semantic similarity, graph traversal, and context tracking to produce stable and predictable routing—even as intents change.
backend/models/message.js ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import mongoose from 'mongoose';
2
+
3
+ const messageSchema = new mongoose.Schema({
4
+ role: { type: String, enum: ['user', 'bot', 'image'], required: true },
5
+ text: { type: String },
6
+ src: { type: String },
7
+ timestamp: { type: Date, default: Date.now }
8
+ });
9
+
10
+ export default mongoose.model('Message', messageSchema);
backend/server.js ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from "express";
2
+ import cors from "cors";
3
+ import { spawn } from "child_process";
4
+ import path from "path";
5
+ import mongoose from 'mongoose';
6
+ import Message from './models/message.js'; // adjust path if needed
7
+
8
+ const app = express();
9
+ app.use(cors());
10
+ app.use(express.json());
11
+
12
+ // Start Python engine once
13
+ const python = spawn("/home/dwarakesh/base/bin/python3", [
14
+ path.join(process.cwd(), "../python_engine/engine_server.py")
15
+ ]);
16
+
17
+ let ready = false;
18
+
19
+ // Read stderr for logs + ready signal
20
+ python.stderr.on("data", (data) => {
21
+ const msg = data.toString();
22
+ console.log("[PY]", msg.trim());
23
+ if (msg.includes("READY")) ready = true;
24
+ });
25
+
26
+ // Tiny queue to handle async replies
27
+ let callbacks = [];
28
+ python.stdout.on("data", (data) => {
29
+ const lines = data.toString().trim().split("\n");
30
+ for (const line of lines) {
31
+ const cb = callbacks.shift();
32
+ if (cb) cb(JSON.parse(line));
33
+ }
34
+ });
35
+
36
+ // Helper to send request to python
37
+ function askPython(query) {
38
+ return new Promise((resolve, reject) => {
39
+ if (!ready) return reject("Python engine not ready");
40
+
41
+ callbacks.push(resolve);
42
+ python.stdin.write(JSON.stringify({ query }) + "\n");
43
+ });
44
+ }
45
+
46
+ // connect to mongo
47
+ mongoose.connect("mongodb://127.0.0.1:27017/semantic_chat")
48
+ .then(() => console.log("MongoDB connected"))
49
+ .catch((err) => console.error("MongoDB error:", err));
50
+
51
+
52
+ // API endpoint
53
+ app.post("/ask", async (req, res) => {
54
+ try {
55
+ const userQuery = req.body.query;
56
+
57
+ // store user message
58
+ await Message.create({
59
+ role: "user",
60
+ text: userQuery
61
+ });
62
+
63
+ const result = await askPython(userQuery);
64
+
65
+ // store bot messages
66
+ if (Array.isArray(result.response)) {
67
+ for (const msg of result.response) {
68
+ await Message.create({
69
+ role: "bot",
70
+ text: msg
71
+ });
72
+ }
73
+ }
74
+
75
+ res.json(result);
76
+
77
+ } catch (err) {
78
+ console.error("Error in /ask:", err);
79
+ res.status(500).json({ error: err.toString() });
80
+ }
81
+ });
82
+
83
+ app.get("/history", async (req, res) => {
84
+ const messages = await Message.find().sort({ timestamp: 1 });
85
+ res.json(messages);
86
+ });
87
+
88
+ // Clear chat history endpoint
89
+ app.post("/clear", async (req, res) => {
90
+ try {
91
+ // Delete all messages from database
92
+ await Message.deleteMany({});
93
+
94
+ // Create new initial greeting message
95
+ const initialMessage = await Message.create({
96
+ role: "bot",
97
+ text: "Hi. I am Dwarakesh. Ask me anything."
98
+ });
99
+
100
+ res.json({ success: true, message: initialMessage });
101
+ } catch (err) {
102
+ console.error("Error clearing chat:", err);
103
+ res.status(500).json({ error: err.toString() });
104
+ }
105
+ });
106
+
107
+ app.listen(8000, () =>
108
+ console.log("Node server running on http://localhost:8000")
109
+ );
eslint.config.js ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import js from '@eslint/js'
2
+ import globals from 'globals'
3
+ import reactHooks from 'eslint-plugin-react-hooks'
4
+ import reactRefresh from 'eslint-plugin-react-refresh'
5
+ import tseslint from 'typescript-eslint'
6
+ import { defineConfig, globalIgnores } from 'eslint/config'
7
+
8
+ export default defineConfig([
9
+ globalIgnores(['dist']),
10
+ {
11
+ files: ['**/*.{ts,tsx}'],
12
+ extends: [
13
+ js.configs.recommended,
14
+ tseslint.configs.recommended,
15
+ reactHooks.configs.flat.recommended,
16
+ reactRefresh.configs.vite,
17
+ ],
18
+ languageOptions: {
19
+ ecmaVersion: 2020,
20
+ globals: globals.browser,
21
+ },
22
+ },
23
+ ])
frontend/index.html ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/png" href="/favicon.png">
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>Semantic Intent Routing Engine</title>
8
+ </head>
9
+ <body>
10
+ <div id="root"></div>
11
+ <script type="module" src="/src/main.tsx"></script>
12
+ </body>
13
+ </html>
frontend/public/favicon.png ADDED

Git LFS Details

  • SHA256: 4dad548241dbc2a3d53818292e9b4a8a25715e252811ef7990d3be77988cae30
  • Pointer size: 131 Bytes
  • Size of remote file: 699 kB
frontend/src/App.tsx ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState, useRef, useEffect } from 'react';
2
+ import { Send, Trash2 } from 'lucide-react';
3
+
4
+ interface Message {
5
+ type: 'user' | 'bot' | 'image';
6
+ text?: string;
7
+ src?: string;
8
+ timestamp?: string; // ISO string from backend
9
+ }
10
+
11
+ interface ApiResponse {
12
+ response?: string[];
13
+ }
14
+
15
+ export default function SemanticChat() {
16
+ const [messages, setMessages] = useState<Message[]>([
17
+ {
18
+ type: 'bot',
19
+ text: 'Hi. I am Dwarakesh. Ask me anything about myself.',
20
+ timestamp: new Date().toISOString()
21
+ }
22
+ ]);
23
+ const [input, setInput] = useState<string>('');
24
+ const [isLoading, setIsLoading] = useState<boolean>(false);
25
+ const chatContainerRef = useRef<HTMLDivElement>(null);
26
+ const inputRef = useRef<HTMLInputElement>(null);
27
+
28
+ useEffect(() => {
29
+ if (chatContainerRef.current) {
30
+ chatContainerRef.current.scrollTop = chatContainerRef.current.scrollHeight;
31
+ }
32
+ }, [messages]);
33
+
34
+ const handleKeyPress = (e: React.KeyboardEvent<HTMLInputElement>): void => {
35
+ if (e.key === 'Enter' && !isLoading && input.trim().length > 0) {
36
+ handleSubmit();
37
+ }
38
+ };
39
+
40
+ const formatTime = (timestamp?: string): string => {
41
+ if (!timestamp) return new Date().toLocaleTimeString('en-US', { hour: '2-digit', minute: '2-digit' });
42
+ return new Date(timestamp).toLocaleTimeString('en-US', { hour: '2-digit', minute: '2-digit' });
43
+ };
44
+
45
+ const handleSubmit = async (): Promise<void> => {
46
+ const userMessage = input.trim();
47
+ const userTimestamp = new Date().toISOString();
48
+ setInput('');
49
+ inputRef.current?.focus();
50
+
51
+ setMessages(prev => [...prev, { type: 'user', text: userMessage, timestamp: userTimestamp }]);
52
+ setIsLoading(true);
53
+
54
+ try {
55
+ const res = await fetch('http://localhost:8000/ask', {
56
+ method: 'POST',
57
+ headers: { 'Content-Type': 'application/json' },
58
+ body: JSON.stringify({ query: userMessage })
59
+ });
60
+
61
+ const data: ApiResponse = await res.json();
62
+
63
+ if (data.response && Array.isArray(data.response)) {
64
+ data.response.forEach((responseText: string) => {
65
+ const htmlContent = responseText.trim();
66
+
67
+ if (htmlContent.length > 0) {
68
+ setMessages(prev => [
69
+ ...prev,
70
+ { type: 'bot', text: htmlContent, timestamp: new Date().toISOString() }
71
+ ]);
72
+ }
73
+ });
74
+ }
75
+
76
+ } catch (err) {
77
+ console.error(err);
78
+ setMessages(prev => [...prev, {
79
+ type: 'bot',
80
+ text: 'Sorry, something went wrong.',
81
+ timestamp: new Date().toISOString()
82
+ }]);
83
+ } finally {
84
+ setIsLoading(false);
85
+ }
86
+ };
87
+
88
+ const handleClearChat = async (): Promise<void> => {
89
+ try {
90
+ await fetch('http://localhost:8000/clear', {
91
+ method: 'POST'
92
+ });
93
+
94
+ // Create new initial message with current timestamp
95
+ const initialMessage = {
96
+ type: 'bot' as const,
97
+ text: 'Hi. I am Dwarakesh. Ask me anything about myself.',
98
+ timestamp: new Date().toISOString()
99
+ };
100
+
101
+ setMessages([initialMessage]);
102
+ } catch (err) {
103
+ console.error('Failed to clear chat:', err);
104
+ }
105
+ };
106
+
107
+ useEffect(() => {
108
+ async function loadHistory() {
109
+ try {
110
+ const res = await fetch('http://localhost:8000/history');
111
+ const data = await res.json();
112
+
113
+ const mapped = data.map((msg: any) => ({
114
+ type: msg.role,
115
+ text: msg.text,
116
+ src: msg.src,
117
+ timestamp: msg.timestamp
118
+ }));
119
+
120
+ // Only show initial greeting if there's no history
121
+ if (mapped.length === 0) {
122
+ setMessages([
123
+ {
124
+ type: 'bot',
125
+ text: 'Hi. I am Dwarakesh. Ask me anything about myself.',
126
+ timestamp: new Date().toISOString()
127
+ }
128
+ ]);
129
+ } else {
130
+ setMessages(mapped);
131
+ }
132
+ } catch (err) {
133
+ console.error('Failed to load history:', err);
134
+ }
135
+ }
136
+
137
+ loadHistory();
138
+ }, []);
139
+
140
+ return (
141
+ <div className="h-screen bg-gray-50 flex">
142
+ {/* Documentation Section - 40% */}
143
+ <div className="hidden md:block w-[40%] bg-white overflow-y-auto">
144
+ <div className="p-8">
145
+ <h1 className="text-3xl font-bold text-gray-900 mb-4">Semantic Intent Routing Engine</h1>
146
+
147
+ <p className="text-gray-700 mb-6 leading-relaxed">
148
+ A lightweight, fast, and fully deterministic alternative to traditional intent-classification systems.
149
+ Built with <strong>React + Vite + Tailwind</strong> on the frontend and a <strong>Python-based semantic inference engine</strong> on the backend.
150
+ </p>
151
+
152
+ <p className="text-gray-700 mb-8 leading-relaxed">
153
+ This system routes user queries through a graph of intents using sentence embeddings, adaptive confidence logic, and retrieval-based fallbacks—no classifiers, training loops, or model deployments required.
154
+ </p>
155
+
156
+ <hr className="my-8 border-gray-200" />
157
+
158
+ <h2 className="text-2xl font-bold text-gray-900 mb-4">Features</h2>
159
+
160
+ <div className="space-y-6">
161
+ <div>
162
+ <h3 className="text-lg font-semibold text-gray-900 mb-2">Deterministic Intent Resolution</h3>
163
+ <p className="text-gray-700 leading-relaxed">
164
+ Resolves user queries by traversing a DAG-structured intent graph with path-level scoring instead of a single classifier.
165
+ </p>
166
+ </div>
167
+
168
+ <div>
169
+ <h3 className="text-lg font-semibold text-gray-900 mb-2">Adaptive Confidence Thresholding</h3>
170
+ <p className="text-gray-700 leading-relaxed">
171
+ Automatically adjusts sensitivity based on query length and phrasing, improving routing stability on ambiguous inputs.
172
+ </p>
173
+ </div>
174
+
175
+ <div>
176
+ <h3 className="text-lg font-semibold text-gray-900 mb-2">Retrieval-Augmented Fallbacks</h3>
177
+ <p className="text-gray-700 leading-relaxed">
178
+ When a query doesn't clearly match any intent, the system fetches semantically similar candidates and recovers gracefully.
179
+ </p>
180
+ </div>
181
+
182
+ <div>
183
+ <h3 className="text-lg font-semibold text-gray-900 mb-2">Multi-Turn Context Handling</h3>
184
+ <p className="text-gray-700 leading-relaxed">
185
+ Maintains conversational context so follow-up questions like "same as before" or "for that" route correctly without repeating selections.
186
+ </p>
187
+ </div>
188
+
189
+ <div>
190
+ <h3 className="text-lg font-semibold text-gray-900 mb-2">Hot-Swappable Intent Graph</h3>
191
+ <p className="text-gray-700 leading-relaxed">
192
+ Intents are defined in JSON and automatically converted into a navigable graph.
193
+ Updates apply instantly—no retraining or redeployment required.
194
+ </p>
195
+ </div>
196
+
197
+ <div>
198
+ <h3 className="text-lg font-semibold text-gray-900 mb-2">Fast and Lightweight</h3>
199
+ <p className="text-gray-700 leading-relaxed">
200
+ Runs entirely on CPU and maintains <strong>sub-15ms routing latency</strong> thanks to caching and optimized traversal.
201
+ </p>
202
+ </div>
203
+ </div>
204
+
205
+ <hr className="my-8 border-gray-200" />
206
+
207
+ <h2 className="text-2xl font-bold text-gray-900 mb-4">Architecture Overview</h2>
208
+
209
+ <ul className="space-y-2 text-gray-700 leading-relaxed">
210
+ <li><strong>Frontend:</strong> React + Vite + Tailwind interface for entering queries and testing the routing behavior.</li>
211
+ <li><strong>Backend:</strong> Python engine using sentence-transformer embeddings and deterministic traversal logic.</li>
212
+ <li><strong>Intent Graph:</strong> JSON-defined structure supporting multi-parent nodes, examples, responses, and metadata.</li>
213
+ </ul>
214
+
215
+ <hr className="my-8 border-gray-200" />
216
+
217
+ <h2 className="text-2xl font-bold text-gray-900 mb-4">Why This Exists</h2>
218
+
219
+ <p className="text-gray-700 mb-4 leading-relaxed">
220
+ Most NLU systems rely on classifiers or fine-tuned models, which brings problems like:
221
+ </p>
222
+
223
+ <ul className="list-disc list-inside space-y-1 text-gray-700 mb-4 ml-4">
224
+ <li>retraining loops</li>
225
+ <li>model drift</li>
226
+ <li>slow iteration cycles</li>
227
+ <li>low explainability</li>
228
+ </ul>
229
+
230
+ <p className="text-gray-700 leading-relaxed">
231
+ This project avoids all of that by using semantic similarity, graph traversal, and context tracking to produce stable and predictable routing—even as intents change.
232
+ </p>
233
+ </div>
234
+ </div>
235
+
236
+ {/* Chat Section - 60% */}
237
+ <div className="w-full md:w-[60%] bg-gray-100 flex flex-col border-r border-gray-300">
238
+ {/* Header */}
239
+ <div className="bg-teal-600 text-white p-4 shadow-sm">
240
+ <div className="flex items-center justify-between">
241
+ <div className="flex items-center gap-3">
242
+ <div className="w-10 h-10 bg-teal-700 rounded-full flex items-center justify-center font-bold overflow-hidden">
243
+ <img
244
+ src="/static/dp.png"
245
+ alt="D"
246
+ className="w-full h-full object-contain rounded-full"
247
+ />
248
+ </div>
249
+ <div>
250
+ <h1 className="font-semibold text-lg">Dwarakesh</h1>
251
+ <p className="text-xs text-teal-100">
252
+ Semantic intent routing engine
253
+ <span className="inline md:hidden">
254
+ &nbsp; [Works best on large screens]
255
+ </span>
256
+ </p>
257
+ </div>
258
+ </div>
259
+ <button
260
+ onClick={handleClearChat}
261
+ className="p-2 hover:bg-teal-700 rounded-full transition-colors cursor-pointer disabled:cursor-not-allowed"
262
+ title="Clear chat"
263
+ >
264
+ <Trash2 size={20} />
265
+ </button>
266
+ </div>
267
+ </div>
268
+
269
+ {/* Chat messages container */}
270
+ <div
271
+ ref={chatContainerRef}
272
+ className="flex-1 overflow-y-auto p-4 bg-gray-100"
273
+ style={{
274
+ backgroundImage: `repeating-linear-gradient(45deg, transparent, transparent 10px, rgba(0,0,0,.02) 10px, rgba(0,0,0,.02) 20px)`
275
+ }}
276
+ >
277
+ <div className="space-y-3 px-8 py-8">
278
+ {messages.map((msg, idx) => (
279
+ <React.Fragment key={idx}>
280
+ {msg.type === 'user' && (
281
+ <div className="flex justify-end">
282
+ <div className="max-w-[70%] bg-white text-gray-800 px-4 py-2 rounded-lg rounded-tr-sm shadow-sm">
283
+ <div className="font-semibold text-teal-600 text-sm mb-1">You</div>
284
+ <div>{msg.text}</div>
285
+ <div className="text-xs text-gray-500 mt-1 text-right">
286
+ {formatTime(msg.timestamp)}
287
+ </div>
288
+ </div>
289
+ </div>
290
+ )}
291
+
292
+ {msg.type === 'bot' && (
293
+ <div className="flex justify-start">
294
+ <div className="max-w-[70%] bg-white text-gray-800 px-4 py-2 rounded-lg rounded-tl-sm shadow-sm">
295
+ <div className="font-semibold text-teal-600 text-sm mb-1">Dwarakesh</div>
296
+ <div className="bot-html-content" dangerouslySetInnerHTML={{ __html: msg.text ?? "" }} />
297
+ <div className="text-xs text-gray-500 mt-1">
298
+ {formatTime(msg.timestamp)}
299
+ </div>
300
+ </div>
301
+ </div>
302
+ )}
303
+
304
+ {msg.type === 'image' && (
305
+ <>
306
+ <br />
307
+ <div className="flex justify-start">
308
+ <div className="bg-white p-2 rounded-lg shadow-sm">
309
+ <img
310
+ src={msg.src}
311
+ alt="Response"
312
+ className="w-[250px] rounded"
313
+ />
314
+ </div>
315
+ </div>
316
+ <br />
317
+ </>
318
+ )}
319
+ </React.Fragment>
320
+ ))}
321
+ </div>
322
+ </div>
323
+
324
+ {/* Input area */}
325
+ <div className="bg-gray-200 p-3 border-t border-gray-300">
326
+ <div className="flex items-center gap-2">
327
+ <input
328
+ type="text"
329
+ value={input}
330
+ onChange={(e: React.ChangeEvent<HTMLInputElement>) => setInput(e.target.value)}
331
+ onKeyDown={handleKeyPress}
332
+ placeholder="Type a message"
333
+ className="flex-1 px-4 py-3 rounded-full bg-white border-none focus:outline-none disabled:bg-gray-100 disabled:cursor-not-allowed"
334
+ ref={inputRef}
335
+ />
336
+ <button
337
+ onClick={handleSubmit}
338
+ disabled={isLoading || input.trim().length === 0}
339
+ className="w-12 h-12 bg-teal-600 hover:bg-teal-700 cursor-pointer text-white rounded-full flex items-center justify-center disabled:bg-gray-400 disabled:cursor-not-allowed transition-colors"
340
+ >
341
+ <Send size={18} />
342
+ </button>
343
+ </div>
344
+ </div>
345
+ </div>
346
+ </div>
347
+ );
348
+ }
frontend/src/index.css ADDED
@@ -0,0 +1 @@
 
 
1
+ @import "tailwindcss";
frontend/src/main.tsx ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import { StrictMode } from 'react'
2
+ import { createRoot } from 'react-dom/client'
3
+ import './index.css'
4
+ import App from './App.tsx'
5
+
6
+ createRoot(document.getElementById('root')!).render(
7
+ <StrictMode>
8
+ <App />
9
+ </StrictMode>,
10
+ )
frontend/static/dp.png ADDED

Git LFS Details

  • SHA256: b68d5c4c6f27b3f7dcfbc2ae449cbd2d61d44df246dabccca2b5c27115a7cc81
  • Pointer size: 132 Bytes
  • Size of remote file: 1.01 MB
frontend/static/forest_tent.png ADDED

Git LFS Details

  • SHA256: d2673c62259d031ba6052d09c38523d7d188099dcc2b290c9e649a029c59ad52
  • Pointer size: 132 Bytes
  • Size of remote file: 3.9 MB
frontend/static/holter.jpg ADDED

Git LFS Details

  • SHA256: b1eb25b31b4b7264e0032cb5027504436b5b7eb2797c59155232a4a300e155d0
  • Pointer size: 131 Bytes
  • Size of remote file: 243 kB
frontend/static/kratos.jpg ADDED

Git LFS Details

  • SHA256: 8b1fd5956303b9bf0354f021538f3c4ec4a89a3c0aa55d4edc7fdaa8332c9c65
  • Pointer size: 132 Bytes
  • Size of remote file: 1.12 MB
frontend/static/mountain_tent.jpg ADDED

Git LFS Details

  • SHA256: e936f15db8af98dc2ee01dba65448644b8d26db0d1921b5a5ce6010cbc0b517c
  • Pointer size: 132 Bytes
  • Size of remote file: 1.65 MB
frontend/static/my_room.png ADDED

Git LFS Details

  • SHA256: fd273696b038883eaf316e75cc07ff1fcace1e31659371f7c6fc9aa4653e59c1
  • Pointer size: 132 Bytes
  • Size of remote file: 4.07 MB
frontend/static/stark.jpg ADDED

Git LFS Details

  • SHA256: ae1e867e08aae175b723a4cc8cc6fe3c50e2094fc75f1976bad06c3f8153b167
  • Pointer size: 131 Bytes
  • Size of remote file: 904 kB
package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
package.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "semantic-analyzer",
3
+ "private": true,
4
+ "version": "0.0.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "dev": "vite",
8
+ "build": "tsc -b && vite build",
9
+ "lint": "eslint .",
10
+ "preview": "vite preview"
11
+ },
12
+ "dependencies": {
13
+ "@tailwindcss/vite": "^4.1.17",
14
+ "cors": "^2.8.5",
15
+ "express": "^5.2.1",
16
+ "lucide-react": "^0.556.0",
17
+ "mongoose": "^9.0.1",
18
+ "react": "^19.2.0",
19
+ "react-dom": "^19.2.0",
20
+ "tailwindcss": "^4.1.17"
21
+ },
22
+ "devDependencies": {
23
+ "@eslint/js": "^9.39.1",
24
+ "@types/node": "^24.10.1",
25
+ "@types/react": "^19.2.5",
26
+ "@types/react-dom": "^19.2.3",
27
+ "@vitejs/plugin-react": "^5.1.1",
28
+ "eslint": "^9.39.1",
29
+ "eslint-plugin-react-hooks": "^7.0.1",
30
+ "eslint-plugin-react-refresh": "^0.4.24",
31
+ "globals": "^16.5.0",
32
+ "typescript": "~5.9.3",
33
+ "typescript-eslint": "^8.46.4",
34
+ "vite": "^7.2.4"
35
+ }
36
+ }
python_engine/__pycache__/core.cpython-311.pyc ADDED
Binary file (12.8 kB). View file
 
python_engine/__pycache__/json_parser.cpython-311.pyc ADDED
Binary file (2.58 kB). View file
 
python_engine/__pycache__/tree.cpython-311.pyc ADDED
Binary file (5.09 kB). View file
 
python_engine/__pycache__/util.cpython-311.pyc ADDED
Binary file (588 Bytes). View file
 
python_engine/core.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sentence_transformers import SentenceTransformer, util
2
+ from nltk.tokenize import sent_tokenize
3
+ from util import select_random_from_list
4
+ from math import exp
5
+ import sys
6
+
7
+ all_MiniLM_L12_v2 = SentenceTransformer("all-MiniLM-L12-v2")
8
+ """
9
+ all-MiniLM-L12-v2 is a sentence embedding model used for tasks involving semantic textual similarity, clustering, semantic search, and information retrieval. They convert a sentence to tensor based on their intent and then matching patterns like cos_sim can be used to compare them to other sentences.
10
+ """
11
+
12
+ CONNECTION_PHRASES = ["just like that", "for the same", "similarly", "similar to the previous", "for that", "for it"]
13
+ CONNECTION_ENCODE = all_MiniLM_L12_v2.encode(CONNECTION_PHRASES,convert_to_tensor=True)
14
+
15
+ CLEAR_MESSAGES = ["delete", "delete context", "delete history", "clear", "clear context", "clear history", "reset", "reset context", "reset chat", "forget", "forget all"]
16
+ CLEAR_MESSAGES_ENCODE = all_MiniLM_L12_v2.encode(CLEAR_MESSAGES,convert_to_tensor=True)
17
+
18
+ prev_label = ""
19
+ prev_query_data = [] # Stores previous context if queries contain ambiguous content that may map to previous responses
20
+
21
+ confidence_threshold = 0.35 # Default confidence threshold
22
+ def generate_confidence_threshold(query: str, base=0.6, decay=0.03, min_threshold=0.25)->float:
23
+ """Generate confidence threshold based on the sentence. Longer sentence lead to lower confidences, so confidence threshold is adjusted based on that.
24
+ Parameters:
25
+ 1. query: Modify threshold based on this sentence
26
+ 2. base, decay: 0.8*e^(-(decay * no. of words in string))
27
+ 3. min_threshold: Clamp to minimum to avoid much lower confidence values"""
28
+ global confidence_threshold
29
+ length = len(query.split())
30
+ confidence_threshold = max(base * exp(-decay * length), min_threshold)
31
+
32
+ # The value of each node contains the following data
33
+ # node.value[0] -> intent
34
+ # node.value[1] -> label
35
+ # node.value[2] -> examples
36
+ # node.value[3] -> response
37
+ # node.value[4] -> children
38
+
39
+ def cache_embeddings(tree, model = all_MiniLM_L12_v2)->None:
40
+ """Store the encoded examples as part of the tree itself to avoid repetitive computations.
41
+ Parameters:
42
+ 1. tree: Tree to cache embeddings
43
+ 2. model: Which model to use to encode (Default: Global model all_MiniLM_L12_v2)"""
44
+
45
+ def _cache_node_embeddings(n):
46
+ if isinstance(n.value, tuple) and len(n.value) >= 2:
47
+ examples = n.value[2]
48
+ n.embedding_cache = model.encode(examples, convert_to_tensor=True)
49
+ for child in n.children:
50
+ _cache_node_embeddings(child)
51
+ _cache_node_embeddings(tree.root)
52
+
53
+ SECOND_PERSON_MENTIONS = ["you", "youre", "your", "yours", "yourself", "y'all", "y'all's", "y'all'self", "you're", "your'e""u", "ur", "urs", "urself"]
54
+ def get_user_query(message="", model = all_MiniLM_L12_v2)->str:
55
+ """Separate function to get input from user.
56
+ Parameters:
57
+ 1. message: Show message to user before recieving input (Default: empty)
58
+ 2. model: Which model to use to encode (Default: Global model all_MiniLM_L12_v2)"""
59
+
60
+ query = input(message).lower().strip()
61
+ while query == "":
62
+ query = input(message).lower().strip()
63
+
64
+ query = query.replace(" "," ") # Remove double spaces
65
+ for spm in SECOND_PERSON_MENTIONS: # Remove second person mentions
66
+ query = query.replace(spm,"Amber AI") # Replace with bot name
67
+ generate_confidence_threshold(query)
68
+ query_encode = model.encode(query, convert_to_tensor=True)
69
+ clear_intent = util.cos_sim(query_encode,CLEAR_MESSAGES_ENCODE).max().item()
70
+ if clear_intent > confidence_threshold:
71
+ return None
72
+ return query
73
+
74
+ def _calculate_single_level(user_embed,predicted_intent):
75
+ """Calculate predictions for the children of a single node. Each node contains a list of nodes as its children.
76
+ Parameters:
77
+ 1. user_embed: User query converted to tensor
78
+ 2. predicted_intent: Calculate for children of this node"""
79
+
80
+ categories = predicted_intent.children # List of node objects
81
+ predicted_intent = None
82
+ high_intent = 0
83
+ for category in categories:
84
+ if category.embedding_cache is None:
85
+ raise ValueError("Embedding cache missing. Call cache_embeddings() on the tree first")
86
+ score = util.cos_sim(user_embed, category.embedding_cache).max().item()
87
+
88
+ if score > high_intent:
89
+ high_intent = score
90
+ predicted_intent = category # Node object
91
+ return (predicted_intent,high_intent) # Returns the child node with the highest prediction confidence and the confidence value
92
+
93
+ def _store_prev_data(predicted_intent):
94
+ """Store the previous computed data path.
95
+ Parameters:
96
+ 1. predicted_intent: Store previous data w.r.t this node"""
97
+ # Mutating global prev_query_data
98
+ prev_query_data.clear()
99
+ prev_context_treenode = predicted_intent
100
+ while prev_context_treenode.parent: # Stop at tree root
101
+ prev_query_data.append(prev_context_treenode)
102
+ prev_context_treenode = prev_context_treenode.parent
103
+
104
+ def h_pass(tree, user_embed, predicted_intent = None)->tuple:
105
+ """Use the model to pass through the tree to compare it with the user query in a hierarchical manner and return an output.
106
+ Parameters:
107
+ 1. tree: Which tree to pass through hierarchically
108
+ 2. user_embed: User input converted to tensor
109
+ 3. predicted_intent: Where to start the pass from (Default: Root of the tree)"""
110
+ global prev_label
111
+ predicted_intent = tree.root if predicted_intent == None else predicted_intent
112
+ predicted_intent_parent = None
113
+ high_intent = 0
114
+ passed_once = False
115
+ pass_through_intent = {}
116
+ while predicted_intent.children: # If the node has children, check for the child with the highest confidence value
117
+ predicted_intent_parent = predicted_intent
118
+ predicted_intent, high_intent = _calculate_single_level(user_embed,predicted_intent)
119
+ pass_through_intent[predicted_intent] = high_intent # Store the confidence value of the current node
120
+ if passed_once: # If the data didn't pass even once, then don't store it
121
+ _store_prev_data(predicted_intent_parent) # Storing previous data w.r.t parent node as context is changed from current node
122
+ if high_intent < confidence_threshold: # If highest confidence value is still too low, stop.
123
+ prev_label = predicted_intent_parent.value[1]
124
+ return (predicted_intent, predicted_intent_parent, high_intent, passed_once, False, pass_through_intent) # If the confidence value is low, stop
125
+ passed_once = True
126
+
127
+ _store_prev_data(predicted_intent)
128
+ prev_label = predicted_intent.value[1]
129
+ return (predicted_intent, predicted_intent_parent, high_intent, passed_once, True, pass_through_intent)
130
+
131
+ def query_pass(tree, user_input, model=all_MiniLM_L12_v2)->list:
132
+ """Separate multiple queries into separate single ones, analyze relation between them if any, and process them to give an output while storing incomplete query outputs in non-leaf list, which contains the current level of context.
133
+ Parameters:
134
+ 1. tree: Which tree to pass through hierarchically
135
+ 2. user_input: User input that may contain one or more queries as a string
136
+ 3. model: Which model to use to encode (Default: Global model all_MiniLM_L12_v2)"""
137
+
138
+ queries = sent_tokenize(user_input)
139
+ user_embeddings = [model.encode(query,convert_to_tensor=True) for query in queries]
140
+ result = []
141
+ label = prev_label
142
+
143
+ for i in range(len(queries)):
144
+ generate_confidence_threshold(queries[i])
145
+ pass_value = (None, None, 0, False, False, None)
146
+ # pass_value[0] -> current predicted intention (node)
147
+ # pass_value[1] -> parent node of current predicted intention
148
+ # pass_value[2] -> confidence level
149
+ # pass_value[3] -> has the query passed through the model atleast once?
150
+ # pass_value[4] -> has the query reached a leaf node?
151
+ # pass_value[5] -> confidence values of traversal for query [DEBUGGING PURPOSES]
152
+
153
+ # Acquiring data from previous query if the query has words matching with connecting phrases
154
+ conn_sim = util.cos_sim(user_embeddings[i], CONNECTION_ENCODE).max().item()
155
+ if conn_sim > confidence_threshold:
156
+ queries[i] = queries[i] + label
157
+ user_embeddings[i] = model.encode(queries[i], convert_to_tensor=True)
158
+
159
+ # Pass values through the root node and the nodes that have the current context
160
+ pass_value_root = h_pass(tree,user_embeddings[i]) # Passing through root node
161
+ pass_value_nonleaf = [h_pass(tree,user_embeddings[i],j) for j in prev_query_data] # Passing through nodes that have current context
162
+ all_nodes = [pass_value_root] + pass_value_nonleaf # List of all nodes that have been passed through
163
+ pass_value = max(all_nodes, key=lambda x: x[2]) # Maximum confidence node for available context. Root is always a context.
164
+ print(f"Query reach confidence: {[i[5] for i in all_nodes]}", file=sys.stderr) # DEBUGGING PURPOSES
165
+
166
+ if pass_value[3]: # If the query has passed at least once, ask for data and store current result
167
+ if not pass_value[4]: # If pass has not reached a leaf node, then ask for more data from the user and keep parent context
168
+ label = pass_value[1].value[1]
169
+ result.append(f"{pass_value[1].value[3]}")
170
+ # continue
171
+
172
+ else: # Query has reached a leaf node
173
+ label = pass_value[0].value[1]
174
+ result.append(pass_value[0].value[3])
175
+ # continue
176
+
177
+ else: # Query has not passed even once. Check if it works when previous context is available
178
+ for parent_context in prev_query_data:
179
+ pass_value_context = h_pass(tree, user_embeddings[i], parent_context)
180
+ if pass_value_context[3]: # Check if it has passed at least once
181
+ # If it has passed, then the query is valid
182
+ if not pass_value_context[4]: # If pass has not reached a leaf node, then ask for more data from the user and keep parent context
183
+ label = pass_value_context[1].value[1]
184
+ result.append(f"What are you looking for in {pass_value_context[1].value[0]}? {pass_value_context[1].value[3]}")
185
+ else:
186
+ label = pass_value_context[0].value[1]
187
+ result.append(pass_value_context[0].value[3])
188
+ break # The else block won't be executed if code reaches here
189
+ else: # The else statement of a for loop will execute only if the loop completes, and won't execute when broken by "break"
190
+ result.append(f"I don't quite understand what you are trying to ask by \"{queries[i]}\"")
191
+ # continue
192
+ # End of "for" loop processing queries
193
+
194
+ # Finally, return result. A list of responses same as the length of queries.
195
+ return result
196
+
197
+ def process_user_query(query: str, model = all_MiniLM_L12_v2)->str:
198
+ """Separate function to get input from user.
199
+ Parameters:
200
+ 1. message: Show message to user before recieving input (Default: empty)
201
+ 2. model: Which model to use to encode (Default: Global model all_MiniLM_L12_v2)"""
202
+
203
+ query = query.lower().strip()
204
+ generate_confidence_threshold(query)
205
+ query_encode = model.encode(query, convert_to_tensor=True)
206
+ clear_intent = util.cos_sim(query_encode,CLEAR_MESSAGES_ENCODE).max().item()
207
+ if clear_intent > confidence_threshold:
208
+ return None
209
+ return query
210
+
211
+ def interact_with_user(tree_data, user_input: str) -> str:
212
+ """Handles a single user query and returns a response string."""
213
+ user_input = process_user_query(user_input)
214
+ all_results = []
215
+ if user_input: # If not empty or command
216
+ results = query_pass(tree_data, user_input)
217
+ for result in results:
218
+ # return f"{select_random_from_list(result)}\nContext window: {prev_query_data}"
219
+ all_results.append(f"{select_random_from_list(result)}")
220
+ print(f"Previous query data: {prev_query_data}", file=sys.stderr)
221
+ return all_results
222
+ else:
223
+ # Mutating global variables: Clearing context and recent history on command
224
+ prev_query_data.clear()
225
+ print(f"Previous query data: {prev_query_data}", file=sys.stderr)
226
+ return ["Cleared previous context"]
227
+
228
+
python_engine/engine_server.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import json
3
+ from core import interact_with_user, cache_embeddings
4
+ from json_parser import tree_from_json
5
+ from tree import struct_tree
6
+ from time import time
7
+ import os
8
+
9
+ # Load model once
10
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
11
+ PROJECT_ROOT = os.path.dirname(BASE_DIR)
12
+
13
+ filepath_root = os.path.join(PROJECT_ROOT, "tree_data")
14
+ filepath_naive = "portfolio_tree_data.json"
15
+ filepath = os.path.join(filepath_root, filepath_naive)
16
+
17
+ print("Loading engine...", file=sys.stderr)
18
+
19
+ s = time()
20
+ tree_file = tree_from_json(filepath)
21
+ print(f"Loaded tree json in {time() - s:.2f}s", file=sys.stderr)
22
+
23
+ s = time()
24
+ cache_embeddings(tree_file)
25
+ print(f"Cached embeddings in {time() - s:.2f}s", file=sys.stderr)
26
+
27
+ tree_file.save(filepath[:-5] + ".pkl")
28
+
29
+ s = time()
30
+ tree_data = struct_tree.load(filepath[:-5] + ".pkl")
31
+ print(f"Loaded tree pickle in {time() - s:.2f}s", file=sys.stderr)
32
+
33
+ print("READY", file=sys.stderr)
34
+ sys.stderr.flush()
35
+
36
+ # Request loop
37
+ while True:
38
+ line = sys.stdin.readline()
39
+ if not line:
40
+ break
41
+
42
+ try:
43
+ data = json.loads(line)
44
+ user_query = data.get("query", "")
45
+ result = interact_with_user(tree_data, user_query)
46
+ sys.stdout.write(json.dumps({"response": result}) + "\n")
47
+ sys.stdout.flush()
48
+ except Exception as e:
49
+ sys.stdout.write(json.dumps({"error": str(e)}) + "\n")
50
+ sys.stdout.flush()
python_engine/json_parser.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from tree import struct_tree
4
+
5
+ SECOND_PERSON_MENTIONS = ["you", "your", "yours", "yourself", "y'all", "y'all's", "y'all'self", "you're", "your'e""u", "ur", "urs", "urself"]
6
+
7
+ def tree_from_json(filepath):
8
+ """
9
+ Load a tree from a structured JSON file.
10
+ Root node name is taken from the filename (without extension).
11
+ Each node must have: intent, examples, response, and optionally children.
12
+ """
13
+ with open(filepath, 'r') as f:
14
+ data = json.load(f)
15
+
16
+ filename_intent = os.path.splitext(os.path.basename(filepath))[0]
17
+ root_value = (filename_intent, ["Everything"], data["response"])
18
+ tree = struct_tree(root_value)
19
+
20
+ def add_children(parent_node, children_data):
21
+ for child in children_data:
22
+ # Validation
23
+ if not all(k in child for k in ("intent", "label", "examples", "response")):
24
+ raise ValueError(f"Missing required fields in node: {child}")
25
+
26
+ intent = child["intent"]
27
+ label = child["label"]
28
+
29
+ examples = child["examples"]
30
+ # Replace second-person mentions with "Amber AI" to match context
31
+ # for i in range(len(examples)):
32
+ # for spm in SECOND_PERSON_MENTIONS:
33
+ # examples[i] = examples[i].replace(spm, "Amber AI")
34
+
35
+ response = child["response"]
36
+
37
+ value = (intent, label, examples, response)
38
+ new_node = tree.add_node(parent_node, value)
39
+
40
+ if "children" in child:
41
+ add_children(new_node, child["children"])
42
+
43
+ if "children" in data:
44
+ add_children(tree.root, data["children"])
45
+
46
+ return tree
python_engine/tree.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+
3
+ class node:
4
+ """Individual node class of each node of tree"""
5
+ def __init__(self, value=None):
6
+ self.value = value
7
+ self.parent = None
8
+ self.children = []
9
+ self.embedding_cache = None # Will hold precomputed embeddings if available
10
+
11
+ def __repr__(self):
12
+ return str(self.value[0]) if isinstance(self.value, tuple) else str(self.value)
13
+
14
+ class struct_tree:
15
+ def __init__(self,value): # Root is mandatory
16
+ self.root = node(value)
17
+
18
+ def add_node(self, parent_node: node, value): # Add a node
19
+ """Add node to the tree."""
20
+ new_node = node(value)
21
+ new_node.parent = parent_node
22
+ parent_node.children.append(new_node)
23
+ return new_node
24
+
25
+ def visualize(self, show_labels=True):
26
+ """Display the tree in a graph-like format."""
27
+ def _print_node(current_node, prefix="", is_last=True):
28
+ # Print current node
29
+ branch = "└── " if is_last else "├── "
30
+ print(f"{prefix}{branch}{current_node.value if show_labels else 'o'}")
31
+
32
+ # Prepare prefix for children
33
+ extension = " " if is_last else "│ "
34
+ new_prefix = prefix + extension
35
+
36
+ # Print children
37
+ child_count = len(current_node.children)
38
+ for i, child in enumerate(current_node.children):
39
+ is_last_child = i == child_count - 1
40
+ _print_node(child, new_prefix, is_last_child)
41
+
42
+ # Start recursive printing from root
43
+ print(f"{self.root.value if show_labels else 'o'}")
44
+ child_count = len(self.root.children)
45
+ for i, child in enumerate(self.root.children):
46
+ is_last_child = i == child_count - 1
47
+ _print_node(child, "", is_last_child)
48
+
49
+ def save(self, filepath: str):
50
+ """Save the entire tree structure to disk with optional compression."""
51
+ with open(filepath, 'wb') as f:
52
+ pickle.dump(self, f)
53
+
54
+ @staticmethod # This method is called directly on the class rather than an instance of it
55
+ def load(filepath: str):
56
+ """Load the tree structure from disk."""
57
+ with open(filepath, 'rb') as f:
58
+ return pickle.load(f)
59
+
60
+ if __name__ == "__main__": # Execute this only in this file
61
+ # Create a tree with root value "A"
62
+ tree = struct_tree("A")
63
+
64
+ # Add some nodes
65
+ b_node = tree.add_node(tree.root, "B")
66
+ c_node = tree.add_node(tree.root, "C")
67
+ d_node = tree.add_node(tree.root, "D")
68
+
69
+ # Add children to B
70
+ tree.add_node(b_node, "B1")
71
+ b2_node = tree.add_node(b_node, "B2")
72
+ tree.add_node(b2_node, "B2.1")
73
+
74
+ # Add children to C
75
+ tree.add_node(c_node, "C1")
76
+
77
+ # Visualize the tree
78
+ tree.visualize(show_labels=False)
python_engine/util.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from random import randint
2
+ def select_random_from_list(l):
3
+ if isinstance(l, list):
4
+ return l[randint(0,len(l)-1)]
5
+ else:
6
+ return l
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ sentence-transformers
2
+ nltk
tree_data/portfolio_tree_data.json ADDED
@@ -0,0 +1,995 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "intent": "All",
3
+ "label": "All",
4
+ "examples": [],
5
+ "response": "Hello, I'm Dwarakesh. Ask me anything!",
6
+ "children": [
7
+ {
8
+ "intent": "greetings",
9
+ "label": "greetings",
10
+ "examples": [
11
+ "Hi",
12
+ "Hi there",
13
+ "Hello",
14
+ "Hey",
15
+ "Heya",
16
+ "Howdy",
17
+ "Yo",
18
+ "Good to see you",
19
+ "Welcome",
20
+ "Glad you're here",
21
+ "Nice to have you",
22
+ "Sup",
23
+ "Hiya"
24
+ ],
25
+ "response": [
26
+ "Hey there",
27
+ "Hello",
28
+ "Hi",
29
+ "Hey",
30
+ "Howdy",
31
+ "Greetings",
32
+ "Welcome"
33
+ ]
34
+ },
35
+ {
36
+ "intent": "goodbyes",
37
+ "label": "goodbyes",
38
+ "examples": [
39
+ "Bye!",
40
+ "Goodbye",
41
+ "See ya",
42
+ "Cya",
43
+ "Catch you later!",
44
+ "Later!",
45
+ "Take care",
46
+ "Peace out",
47
+ "Talk soon",
48
+ "See you around",
49
+ "Have a good one",
50
+ "See ya later",
51
+ "I'm out",
52
+ "Adios"
53
+ ],
54
+ "response": [
55
+ "Catch you later",
56
+ "See you later",
57
+ "Goodbye",
58
+ "Take care",
59
+ "Bye",
60
+ "See ya"
61
+ ]
62
+ },
63
+ {
64
+ "intent": "thanks",
65
+ "label": "thanks",
66
+ "examples": [
67
+ "Thanks",
68
+ "Thank you",
69
+ "Much appreciated",
70
+ "Thanks a lot",
71
+ "Really appreciate it",
72
+ "Thanks so much",
73
+ "Big thanks",
74
+ "Appreciate it"
75
+ ],
76
+ "response": [
77
+ "Anytime",
78
+ "Happy to help",
79
+ "You're welcome",
80
+ "No problem",
81
+ "Glad I could help",
82
+ "Anytime"
83
+ ]
84
+ },
85
+ {
86
+ "intent": "about",
87
+ "label": "about",
88
+ "examples": [
89
+ "Tell me about yourself",
90
+ "About you",
91
+ "Tell me something about you",
92
+ "Can you tell me about yourself?",
93
+ "About yourself"
94
+ ],
95
+ "response": "I am a student exploring AI/ML and GPU rendering, often diving into Vulkan/low-level graphics. Curiosity-driven projects blend learning with stubborn tinkering."
96
+ },
97
+ {
98
+ "intent": "who are you",
99
+ "label": "who are you",
100
+ "examples": [
101
+ "Who are you?",
102
+ "What's your name?",
103
+ "And you are?",
104
+ "What are you called?",
105
+ "Mind telling me your name?",
106
+ "Can I ask your name?",
107
+ "Who am I talking to?",
108
+ "What do people call you?",
109
+ "What are you?",
110
+ "What am I talking to?"
111
+ ],
112
+ "response": [
113
+ "I am Dwarakesh.",
114
+ "I am Dwarakesh, a chatbot created by and represents Dwarakesh. Yeah we are the same."
115
+ ]
116
+ },
117
+ {
118
+ "intent": "what are you doing",
119
+ "label": "what are you doing",
120
+ "examples": [
121
+ "What are you doing?",
122
+ "What are you up to?"
123
+ ],
124
+ "response": [
125
+ "Chatting with you! Ask me anything related to me or my creator.",
126
+ "I am here to chat with you! Ask me anything related to me or my creator."
127
+ ]
128
+ },
129
+ {
130
+ "intent": "where are you from",
131
+ "label": "where are you from",
132
+ "examples": [
133
+ "Where are you from?",
134
+ "Where do you come from?",
135
+ "What's your origin?",
136
+ "Where were you made?",
137
+ "Where do you live?",
138
+ "Which place are you based in?",
139
+ "Where were you born?",
140
+ "What place are you from?"
141
+ ],
142
+ "response": [
143
+ "I am from Coimbatore. A city in Tamil Nadu, India."
144
+ ],
145
+ "children": [
146
+ {
147
+ "intent": "country",
148
+ "label": "country",
149
+ "examples": [
150
+ "Which country are you from?",
151
+ "What country are you in?",
152
+ "What is your country?",
153
+ "What country do you belong to?"
154
+ ],
155
+ "response": "I am from India, a country in South Asia."
156
+ },
157
+ {
158
+ "intent": "state",
159
+ "label": "state",
160
+ "examples": [
161
+ "Which state are you from?",
162
+ "What state are you in?",
163
+ "What is your state?",
164
+ "What state do you belong to?"
165
+ ],
166
+ "response": "I am from Tamil Nadu, a state in South India."
167
+ },
168
+ {
169
+ "intent": "city",
170
+ "label": "city",
171
+ "examples": [
172
+ "Which city are you from?",
173
+ "What city are you in?",
174
+ "What is your city?",
175
+ "What city do you belong to?"
176
+ ],
177
+ "response": "I am from Coimbatore, located in Tamil Nadu."
178
+ }
179
+ ]
180
+ },
181
+ {
182
+ "intent": "where are you from server",
183
+ "label": "where are you from server",
184
+ "examples": [
185
+ "Server",
186
+ "Where are you hosted?",
187
+ "Where do you run?",
188
+ "Source code",
189
+ "Where can I see your code?",
190
+ "Where are you deployed?"
191
+ ],
192
+ "response": "I run on Hugging face spaces, and my source code is available on files in the same site under the MIT license."
193
+ },
194
+ {
195
+ "intent": "how are you",
196
+ "label": "how are you",
197
+ "examples": [
198
+ "How are you?",
199
+ "How's it going?",
200
+ "How are things?",
201
+ "You doing okay?",
202
+ "How have you been?",
203
+ "Everything good?",
204
+ "All good with you?"
205
+ ],
206
+ "response": [
207
+ "I'm doing just fine. Hope you are too.",
208
+ "I'm doing great. Hope that's the case with you too.",
209
+ "I'm doing well, thanks for asking. Hope you're doing well too."
210
+ ]
211
+ },
212
+ {
213
+ "intent": "education",
214
+ "label": "education",
215
+ "examples": [
216
+ "Where did you study?",
217
+ "Education stats",
218
+ "School",
219
+ "College",
220
+ "Which school did you study in?",
221
+ "Which college did you study in?",
222
+ "Where are you studying?"
223
+ ],
224
+ "response": "I studied at PSBB Millennium till 8th and Suguna Pip School till 12th, both in Coimbatore. I'm currently doing my undergrad at Amrita Vishwa Vidyapeetham, Coimbatore.",
225
+ "children": [
226
+ {
227
+ "intent": "school",
228
+ "label": "school",
229
+ "examples": [
230
+ "School",
231
+ "Which school did you study in?",
232
+ "Where was your early education?"
233
+ ],
234
+ "response": "I studied at PSBB Millennium till 8th and Suguna Pip School till 12th, both in Coimbatore. As for kindergarten, I was in Akshara fun School, Coimbatore."
235
+ },
236
+ {
237
+ "intent": "college",
238
+ "label": "college",
239
+ "examples": [
240
+ "College",
241
+ "Which college did you study in?",
242
+ "Where did you do your UG?",
243
+ "Where did you complete your undergraduate degree?",
244
+ "Are you pursuing your undergraduate degree?"
245
+ ],
246
+ "response": "I'm currently pursuing my undergraduate degree for B.TECH Computer Science in Amrita Vishwa Vidyapeetham, Coimbatore."
247
+ }
248
+ ]
249
+ },
250
+ {
251
+ "intent": "10th board scores",
252
+ "label": "10th board scores",
253
+ "examples": [
254
+ "10th board scores",
255
+ "What is your 10th board score?",
256
+ "What was your 10th grade score?",
257
+ "What was your 10th grade percentage?",
258
+ "What was your 10th grade CGPA?",
259
+ "What was your 10th grade marks?",
260
+ "How much did you score in middle school?"
261
+ ],
262
+ "response": "I scored 461/500 in my 10th grade board exams, which is 92.2%, in the CBSE board."
263
+ },
264
+ {
265
+ "intent": "12th board scores",
266
+ "label": "12th board scores",
267
+ "examples": [
268
+ "12th board scores",
269
+ "What is your 12th board score?",
270
+ "What was your 12th grade score?",
271
+ "What was your 12th grade percentage?",
272
+ "What was your 12th grade CGPA?",
273
+ "What was your 12th grade marks?",
274
+ "How much did you score in high school?"
275
+ ],
276
+ "response": "I scored 454/500 in my 12th grade board exams, which is 90.8%, in the CBSE board."
277
+ },
278
+ {
279
+ "intent": "entrance exam",
280
+ "label": "entrance exam",
281
+ "examples": [
282
+ "Entrance exam",
283
+ "What entrance exams did you take?",
284
+ "Did you take any entrance exams?",
285
+ "What are your entrance exam scores?",
286
+ "What was your JEE score?",
287
+ "What was your JEE Advanced score?",
288
+ "What was your JEE Mains score?"
289
+ ],
290
+ "response": "I took the JEE Mains and JEE Advanced. I scored 97.9 percentile in JEE Mains, and 48/360 in JEE Advanced.",
291
+ "children": [
292
+ {
293
+ "intent": "JEE Mains",
294
+ "label": "JEE Mains",
295
+ "examples": [
296
+ "What was your JEE Mains score?",
297
+ "What was your JEE Mains percentile?",
298
+ "What was your JEE Mains rank?"
299
+ ],
300
+ "response": "I scored 97.9 percentile in JEE Mains."
301
+ },
302
+ {
303
+ "intent": "JEE Advanced",
304
+ "label": "JEE Advanced",
305
+ "examples": [
306
+ "What was your JEE Advanced score?",
307
+ "What was your JEE Advanced rank?"
308
+ ],
309
+ "response": "I scored 48/360 in JEE Advanced."
310
+ }
311
+ ]
312
+ },
313
+ {
314
+ "intent": "exam fail",
315
+ "label": "exam fail",
316
+ "examples": [
317
+ "Have you failed an exam?",
318
+ "Did you ever mess up an exam?",
319
+ "Have you failed before?",
320
+ "Ever gotten something totally wrong?",
321
+ "Have you had a bad result?"
322
+ ],
323
+ "response": "I have failed in my JEE Advanced exam. My score was 48, while the cutoff was 55, out of 360."
324
+ },
325
+ {
326
+ "intent": "arrears",
327
+ "label": "arrears",
328
+ "examples": [
329
+ "Arrears",
330
+ "Do you have any arrears?",
331
+ "Any pending subjects to clear?",
332
+ "Any backlogs?"
333
+ ],
334
+ "response": [
335
+ "Nope! I'm all clear - never been in arrears.",
336
+ "I have never had any arrears, and never will!"
337
+ ]
338
+ },
339
+ {
340
+ "intent": "future plans",
341
+ "label": "higher studies",
342
+ "examples": [
343
+ "Future plans",
344
+ "What do you plan to do in the future?",
345
+ "Got any plans for later?",
346
+ "Higher studies",
347
+ "Are you planning for higher studies?",
348
+ "After college",
349
+ "What will you do after college?",
350
+ "What will you do after getting a degree?"
351
+ ],
352
+ "response": "I have no plans for higher studies. I wish for a placement in a good company, but I have not decided my domain yet.",
353
+ "children": [
354
+ {
355
+ "intent": "higher studies",
356
+ "label": "higher studies",
357
+ "examples": [
358
+ "Are you planning for higher studies?",
359
+ "What are your plans for higher studies?",
360
+ "Do you want to pursue higher studies?"
361
+ ],
362
+ "response": "I have no plans for higher studies."
363
+ },
364
+ {
365
+ "intent": "placement",
366
+ "label": "placement",
367
+ "examples": [
368
+ "What will you do after college?",
369
+ "What will you do after getting a degree?",
370
+ "Are you looking for a job?",
371
+ "Are you looking for placement?"
372
+ ],
373
+ "response": "I wish for a placement in a good company, but I have not decided my domain yet."
374
+ }
375
+ ]
376
+ },
377
+ {
378
+ "intent": "field of interest",
379
+ "label": "machine learning, deep learning",
380
+ "examples": [
381
+ "What are you interested in?",
382
+ "Technical interests?",
383
+ "What is your field of interest?",
384
+ "Is something your field of interest?"
385
+ ],
386
+ "response": "I am particularly interested in machine learning and deep learning, but I do not dislike other domains."
387
+ },
388
+ {
389
+ "intent": "hobbies",
390
+ "label": "blender, badminton",
391
+ "examples": [
392
+ "What do you in your free time?",
393
+ "Hobbies",
394
+ "What are your hobbies?",
395
+ "Recreational activities",
396
+ "Blender",
397
+ "Do you use blender?",
398
+ "What do you do in blender?",
399
+ "How long have you been using blender?",
400
+ "Renders",
401
+ "Can I see your renders?",
402
+ "Show me your renders",
403
+ "Badminton",
404
+ "How long have you been playing badminton?",
405
+ "How good are you at badminton?",
406
+ "Drawing",
407
+ "What do you draw?",
408
+ "Since when have you been drawing?",
409
+ "Can I see your drawings?",
410
+ "Show me your drawings"
411
+ ],
412
+ "response": "I am a 3D enthusiast primarily working on the free open source software Blender. I also play badminton, and draw sometimes.",
413
+ "children": [
414
+ {
415
+ "intent": "blender",
416
+ "label": "blender",
417
+ "examples": [
418
+ "Blender",
419
+ "Do you use blender?",
420
+ "What do you do in blender?",
421
+ "How long have you been using blender?",
422
+ "How experienced are you in blender?",
423
+ "What have you made with blender?",
424
+ "How do you use blender?",
425
+ "Can you show me your renders?",
426
+ "Any good renders?",
427
+ "Renders",
428
+ "Can I see your renders?",
429
+ "Show me your renders"
430
+ ],
431
+ "response": "I have been using blender for since 2021. I am primarily a hard surface modelling artist with some experience in environmental and interior design.",
432
+ "children": [
433
+ {
434
+ "intent": "blender time",
435
+ "label": "blender",
436
+ "examples": [
437
+ "When did you start using blender?",
438
+ "How long have you been using blender?",
439
+ "Since when have you been using blender?"
440
+ ],
441
+ "response": "I have been using blender since 2021."
442
+ },
443
+ {
444
+ "intent": "blender render",
445
+ "label": "blender",
446
+ "examples": [
447
+ "What have you made with blender?",
448
+ "How do you use blender?",
449
+ "Can you show me your renders?",
450
+ "Any good renders?",
451
+ "Renders",
452
+ "Can I see your renders?",
453
+ "Show me your renders"
454
+ ],
455
+ "response": "Here is some of my work. <br> <img src = '../static/mountain_tent.jpg' onclick = 'requestFullscreen(this)'> <br> <img src = '/static/forest_tent.png' onclick = 'requestFullscreen(this)'> <br> <img src = '/static/my_room.png' onclick = 'requestFullscreen(this)'>"
456
+ },
457
+ {
458
+ "intent": "blender experience",
459
+ "label": "experience",
460
+ "examples": [
461
+ "How experienced are you in blender?",
462
+ "How good are you at blender?",
463
+ "How good are you at 3D modelling?"
464
+ ],
465
+ "response": "I have been using blender for over 4 years, so I consider myself to be decent."
466
+ }
467
+ ]
468
+ },
469
+ {
470
+ "intent": "badminton",
471
+ "label": "badminton",
472
+ "examples": [
473
+ "Badminton",
474
+ "How long have you been playing badminton?",
475
+ "Since when have you been playing badminton?",
476
+ "How experienced are you at badminton?"
477
+ ],
478
+ "response": "I have been playing badminton since I was 12, but I have had very long breaks in between. Now it is a part of my daily life, and I consider myself an above average player.",
479
+ "children": [
480
+ {
481
+ "intent": "badminton time",
482
+ "label": "badminton",
483
+ "examples": [
484
+ "How long have you been playing badminton?",
485
+ "Since when have you been playing badminton?"
486
+ ],
487
+ "response": "I have been playing badminton since I was 12, but I have had very long breaks in between. Now it is a part of my daily life."
488
+ },
489
+ {
490
+ "intent": "badminton experience",
491
+ "label": "experience",
492
+ "examples": [
493
+ "How good are you at badminton?",
494
+ "How experienced are you at badminton?"
495
+ ],
496
+ "response": "I consider myself an above average player."
497
+ }
498
+ ]
499
+ },
500
+ {
501
+ "intent": "drawing",
502
+ "label": "drawing",
503
+ "examples": [
504
+ "Drawing",
505
+ "What do you draw?",
506
+ "Since when have you been drawing?",
507
+ "Can I see your drawings?",
508
+ "Show me your drawings"
509
+ ],
510
+ "response": "I draw rarely. It used to be my favourite hobby in the past, until I discovered Blender.",
511
+ "children": [
512
+ {
513
+ "intent": "drawing time",
514
+ "label": "drawing",
515
+ "examples": [
516
+ "How long have you been drawing?",
517
+ "Since when have you been drawing?"
518
+ ],
519
+ "response": "I have been drawing since I was 10 years old."
520
+ },
521
+ {
522
+ "intent": "drawing experience",
523
+ "label": "drawing",
524
+ "examples": [
525
+ "How good are you at drawing?",
526
+ "How experienced are you at drawing?"
527
+ ],
528
+ "response": "I am really not sure... it turns out really well sometimes and really bad some other times."
529
+ },
530
+ {
531
+ "intent": "see drawings",
532
+ "label": "see drawings",
533
+ "examples": [
534
+ "Can I see your drawings?",
535
+ "Show me your drawings",
536
+ "What have you drawn?"
537
+ ],
538
+ "response": "Here are some of my drawings. <br> <img src = '/static/holter.jpg' onclick = 'requestFullscreen(this)'> <br> <img src = '/static/stark.jpg' onclick = 'requestFullscreen(this)'> <br> <img src = '/static/kratos.jpg' onclick = 'requestFullscreen(this)'>"
539
+ }
540
+ ]
541
+ }
542
+ ]
543
+ },
544
+ {
545
+ "intent": "dream",
546
+ "label": "dream",
547
+ "examples": [
548
+ "What is your dream?",
549
+ "Dream job?",
550
+ "What do you want to be?",
551
+ "What is your ambition?",
552
+ "What do you aspire to be?"
553
+ ],
554
+ "response": "I would like to be placed among my favourite companies, which are Google, Nvidia and Tesla, but I have no specific dream job.",
555
+ "children": [
556
+ {
557
+ "intent": "dream company",
558
+ "label": "google, nvidia, tesla",
559
+ "examples": [
560
+ "What is your dream company?",
561
+ "Dream company?",
562
+ "Which company do you want to work for?",
563
+ "What is your favourite company?",
564
+ "What companies do you like?",
565
+ "Where do you want to work?",
566
+ "Where do you want to be placed?",
567
+ "What is your dream job?"
568
+ ],
569
+ "response": "My favourite companies are Google, Nvidia and Tesla. I would like to be placed among them."
570
+ },
571
+ {
572
+ "intent": "dream",
573
+ "label": "artificial intelligence",
574
+ "examples": [
575
+ "What is your dream?",
576
+ "What do you want to achieve?",
577
+ "What is something you want to do?"
578
+ ],
579
+ "response": "My dream is to revolutionize the field of artificial intelligence and create industry standard applications."
580
+ }
581
+ ]
582
+ },
583
+ {
584
+ "intent": "work experience",
585
+ "label": "work",
586
+ "examples": [
587
+ "Do you have work experience?",
588
+ "Any jobs before?",
589
+ "Have you worked before?",
590
+ "Your job history?",
591
+ "Have you worked in a job before?",
592
+ "Experience",
593
+ "What is your experience?",
594
+ "About your experience"
595
+ ],
596
+ "response": "I haven't done any official jobs yet. Just learning and building stuff for now."
597
+ },
598
+ {
599
+ "intent": "internships",
600
+ "label": "internships",
601
+ "examples": [
602
+ "Have you done any internships?",
603
+ "Internships?",
604
+ "Have you interned anywhere?",
605
+ "Any internships?"
606
+ ],
607
+ "response": "I haven't done any internships yet. I'm learning stuff for now."
608
+ },
609
+ {
610
+ "intent": "competitions",
611
+ "label": "competitions",
612
+ "examples": [
613
+ "Have you participated in any competitions?",
614
+ "Competitions?",
615
+ "Have you competed in anything?",
616
+ "Any competitions?",
617
+ "Hackathons?",
618
+ "Have you participated in any hackathons?",
619
+ "Have you competed in any hackathons?",
620
+ "Any hackathons?",
621
+ "Have you qualified for a hackathon?"
622
+ ],
623
+ "response": "I have participated in a few hackathons, but I haven't won any yet. I'm still learning and improving."
624
+ },
625
+ {
626
+ "intent": "skills",
627
+ "label": "skills",
628
+ "examples": [
629
+ "What are your skills?",
630
+ "Skills?",
631
+ "What are you good at?",
632
+ "What are your strengths?",
633
+ "What are your abilities?",
634
+ "What can you do?"
635
+ ],
636
+ "response": "I am skilled in the languages Python, C++, Java, HTML, CSS, JavaScript."
637
+ },
638
+ {
639
+ "intent": "specialization",
640
+ "label": "specialization",
641
+ "examples": [
642
+ "What is your specialization?",
643
+ "What are your special skills?",
644
+ "Specialization?",
645
+ "What are you specialized in?",
646
+ "What is your major?"
647
+ ],
648
+ "response": "I am specialized in Python, with a focus on AI/ML and Rust, GPU rendering. I am also interested in low-level graphics programming."
649
+ },
650
+ {
651
+ "intent": "soft skills",
652
+ "label": "soft skills",
653
+ "examples": [
654
+ "What are your soft skills?",
655
+ "Soft skills?",
656
+ "What are your interpersonal skills?",
657
+ "What are your people skills?",
658
+ "What are your communication skills?"
659
+ ],
660
+ "response": "I am a good leader and a good team player. I am also a good communicator, and I can work well with people from different backgrounds and cultures, though I tend to be a bit shy."
661
+ },
662
+ {
663
+ "intent": "certifications",
664
+ "label": "certifications",
665
+ "examples": [
666
+ "Have you done any certifications?",
667
+ "Certifications?",
668
+ "Have you completed any courses?",
669
+ "Any certifications?",
670
+ "Have you done any online courses?"
671
+ ],
672
+ "response": "I have completed a few online courses, but I haven't received any certifications yet."
673
+ },
674
+ {
675
+ "intent": "languages",
676
+ "label": "languages",
677
+ "examples": [
678
+ "Languages?",
679
+ "What languages do you know?",
680
+ "How many languages do you know?",
681
+ "Can you speak language?",
682
+ "What languages can you speak?",
683
+ "What languages are you fluent in?"
684
+ ],
685
+ "response": "I am fluent in English, Tamil and Telugu, and somewhat fluent in Hindi and Malayalam."
686
+ },
687
+ {
688
+ "intent": "merits and demerits",
689
+ "label": "merits and demerits",
690
+ "examples": [
691
+ "What are your merits?",
692
+ "What are your demerits?",
693
+ "What are your merits and demerits?",
694
+ "What are your strengths and weaknesses?",
695
+ "What are your pros and cons?",
696
+ "What are your good and bad points?",
697
+ "What are your good and bad qualities?"
698
+ ],
699
+ "response": "I am a very curious person, and I love to learn new things. I am also very stubborn, and I don't give up easily. My demerit is that I am not very good at communicating with people, and I tend to be a bit shy.",
700
+ "children": [
701
+ {
702
+ "intent": "merits",
703
+ "label": "merits",
704
+ "examples": [
705
+ "What are your merits?",
706
+ "What are your strengths?",
707
+ "What are your good points?",
708
+ "What are your good qualities?"
709
+ ],
710
+ "response": "I am a very curious person, and I love to learn new things. I am also very stubborn, and I don't give up easily."
711
+ },
712
+ {
713
+ "intent": "demerits",
714
+ "label": "demerits",
715
+ "examples": [
716
+ "What are your demerits?",
717
+ "What are your weaknesses?",
718
+ "What are your bad points?",
719
+ "What are your bad qualities?"
720
+ ],
721
+ "response": "My demerit is that I am not very good at communicating with people, and I tend to be a bit shy."
722
+ }
723
+ ]
724
+ },
725
+ {
726
+ "intent": "research papers",
727
+ "label": "research papers",
728
+ "examples": [
729
+ "Have you published any research papers?",
730
+ "Research papers?",
731
+ "Have you written any research papers?",
732
+ "Any research papers?",
733
+ "Have you written any papers?",
734
+ "Are you researching on something?",
735
+ "Are you doing any research?",
736
+ "Are you working on any research?"
737
+ ],
738
+ "response": "I have not published any research papers yet, but I am working on a few."
739
+ },
740
+ {
741
+ "intent": "projects",
742
+ "label": "projects",
743
+ "examples": [
744
+ "What projects are you working on?",
745
+ "What are your projects?",
746
+ "What are you working on?",
747
+ "What is your current project?",
748
+ "What is your latest project?",
749
+ "What are your current projects?",
750
+ "What are your latest projects?",
751
+ "How do you even work?",
752
+ "How do you function?",
753
+ "How are you able to chat?",
754
+ "What is your technology?",
755
+ "What is your tech stack?",
756
+ "What is your architecture?",
757
+ "How are you built?",
758
+ "How are you made?",
759
+ "What is your framework?",
760
+ "About your tech stack",
761
+ "About your technology",
762
+ "About your working",
763
+ "About Dwarakesh AI",
764
+ "About Dwarakesh",
765
+ "About you",
766
+ "How does Dwarakesh work?",
767
+ "How does Dwarakesh function?"
768
+ ],
769
+ "response": "I am currently working on a routing semantic engine. ",
770
+ "children": [
771
+ {
772
+ "intent": "Dwarakesh AI",
773
+ "label": "Dwarakesh AI",
774
+ "examples": [
775
+ "How do you even work?",
776
+ "How do you function?",
777
+ "How are you able to chat?",
778
+ "What is you technology?",
779
+ "What is your tech stack?",
780
+ "What is your architecture?",
781
+ "How are you built?",
782
+ "How are you made?",
783
+ "What is your framework?",
784
+ "About your tech stack",
785
+ "About your technology",
786
+ "About you working",
787
+ "About you",
788
+ "About Dwarakesh",
789
+ "How does Dwarakesh work?",
790
+ "How does Dwarakesh function?"
791
+ ],
792
+ "response": "I am a semantic engine created by Dwarakesh. For details on how I work, check out my source code and documentation on Hugging face using files in the same porfolio site."
793
+ }
794
+ ]
795
+ },
796
+ {
797
+ "intent": "contact",
798
+ "label": "contact",
799
+ "examples": [
800
+ "How can I contact you?",
801
+ "How can I reach you?",
802
+ "How can I get in touch with you?",
803
+ "How can I connect with you?",
804
+ "How to contact you?",
805
+ "Contact",
806
+ "Contact",
807
+ "Can I colab with you?",
808
+ "Can I collaborate with you?",
809
+ "Can I work with you?",
810
+ "Contact information",
811
+ "Gmail",
812
+ "Email",
813
+ "Github",
814
+ "LinkedIn",
815
+ "Social media",
816
+ "Social links",
817
+ "Social media accounts",
818
+ "What is your email?",
819
+ "What is your github?",
820
+ "What is your linkedin?",
821
+ "What is your social media?"
822
+ ],
823
+ "response": "You can contact me through Github on <a href = 'https://www.github.com/Dwarakesh-V'> github.com/Dwarakesh-V </a>, Email on <email link>, or LinkedIn on <a href = 'https://www.linkedin.com/in/vdwarakesh/'> linkedin.com/in/vdwarakesh </a>.",
824
+ "children": [
825
+ {
826
+ "intent": "email",
827
+ "label": "email",
828
+ "examples": [
829
+ "What is your email?",
830
+ "What is your email address?",
831
+ "Email",
832
+ "Gmail"
833
+ ],
834
+ "response": "My email is <a href='mailto:dwarakesh.2005.4@gmail.com'>dwarakesh.2005.4@gmail.com</a>."
835
+ },
836
+ {
837
+ "intent": "github",
838
+ "label": "github",
839
+ "examples": [
840
+ "What is your github?",
841
+ "Github",
842
+ "Github link"
843
+ ],
844
+ "response": "My github is <a href = 'https://www.github.com/Dwarakesh-V'> github.com/Dwarakesh-V </a>."
845
+ },
846
+ {
847
+ "intent": "linkedin",
848
+ "label": "linkedin",
849
+ "examples": [
850
+ "What is your linkedin?",
851
+ "LinkedIn",
852
+ "LinkedIn link"
853
+ ],
854
+ "response": "My linkedin is <a href = 'https://www.linkedin.com/in/vdwarakesh/'> linkedin.com/in/vdwarakesh </a>."
855
+ }
856
+ ]
857
+ },
858
+ {
859
+ "intent": "favorite color",
860
+ "label": "favorite color",
861
+ "examples": [
862
+ "What is your favorite color?",
863
+ "Favorite color?",
864
+ "What color do you like?",
865
+ "Do you have a favorite color?",
866
+ "What colors do you like?"
867
+ ],
868
+ "response": "My favourite color is green, tied with black."
869
+ },
870
+ {
871
+ "intent": "favorite food",
872
+ "label": "favorite food",
873
+ "examples": [
874
+ "What is your favorite food?",
875
+ "Favorite food?",
876
+ "What food do you like?",
877
+ "Do you have a favorite food?",
878
+ "What foods do you like?"
879
+ ],
880
+ "response": "I like South Indian food in general, but I'm not very picky."
881
+ },
882
+ {
883
+ "intent": "favorite movie",
884
+ "label": "favorite movie",
885
+ "examples": [
886
+ "What is your favorite movie?",
887
+ "Favorite movie?",
888
+ "What movie do you like?",
889
+ "Do you have a favorite movie?",
890
+ "What movies do you like?"
891
+ ],
892
+ "response": "I am not a big fan of movies."
893
+ },
894
+ {
895
+ "intent": "parents",
896
+ "label": "parents",
897
+ "examples": [
898
+ "What are your parents doing?",
899
+ "What is you father doing?",
900
+ "What is your mother doing?",
901
+ "What is your father's job?",
902
+ "What is your mother's job?",
903
+ "What do your parents do?",
904
+ "What do your parents work as?",
905
+ "What do your parents do for a living?"
906
+ ],
907
+ "response": "My father is a buisnessman, and my mother is a government employee."
908
+ },
909
+ {
910
+ "intent": "siblings",
911
+ "label": "siblings",
912
+ "examples": [
913
+ "Do you have any siblings?",
914
+ "Are you an only child?",
915
+ "Do you have a brother or sister?",
916
+ "How many siblings do you have?",
917
+ "What are your siblings doing?"
918
+ ],
919
+ "response": "I have no siblings. I am an only child."
920
+ },
921
+ {
922
+ "intent": "maritial status",
923
+ "label": "maritial status",
924
+ "examples": [
925
+ "Are you married?",
926
+ "Are you single?",
927
+ "Do you have a partner?",
928
+ "Are you in a relationship?",
929
+ "Do you have a girlfriend?",
930
+ "Are you dating?"
931
+ ],
932
+ "response": [
933
+ "I might have a partner. I might be single. Good luck finding out."
934
+ ]
935
+ },
936
+ {
937
+ "intent": "interests",
938
+ "label": "interests",
939
+ "examples": [
940
+ "Is there someone you like?",
941
+ "Are you interested in anyone?",
942
+ "Do you have a crush?",
943
+ "Whom do you like?",
944
+ "Who is your crush?"
945
+ ],
946
+ "response": "There could be someone I like. Maybe I'm lying. Good luck finding out."
947
+ },
948
+ {
949
+ "intent": "age",
950
+ "label": "age",
951
+ "examples": [
952
+ "How old are you?",
953
+ "What is your age?",
954
+ "When were you born?",
955
+ "When is your birthday?",
956
+ "What year were you born?"
957
+ ],
958
+ "response": "I was born on November 2, 2005. Which means I am 19 years old.",
959
+ "children": [
960
+ {
961
+ "intent": "birth date",
962
+ "label": "birth date",
963
+ "examples": [
964
+ "When is your birthday?",
965
+ "What is your birth date?",
966
+ "When were you born?",
967
+ "What year were you born?"
968
+ ],
969
+ "response": "I was born on November 2, 2005."
970
+ },
971
+ {
972
+ "intent": "current age",
973
+ "label": "current age",
974
+ "examples": [
975
+ "How old are you?",
976
+ "What is your age?",
977
+ "How many years old are you?"
978
+ ],
979
+ "response": "I am 19 years old."
980
+ }
981
+ ]
982
+ },
983
+ {
984
+ "intent": "resume",
985
+ "label": "resume",
986
+ "examples": [
987
+ "Resume",
988
+ "Can I see your resume?",
989
+ "Show me your resume",
990
+ "Send me your resume"
991
+ ],
992
+ "response": "Here is my resume.<br><img src = '/static/resume_sample.webp' onclick = 'requestFullscreen(this)'>"
993
+ }
994
+ ]
995
+ }
tree_data/portfolio_tree_data.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e79abe36e19a7a9a41b7ac4e5979e41ad94aae0c92cdee2da2f465a0ba467d6c
3
+ size 707759
tsconfig.app.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
4
+ "target": "ES2022",
5
+ "useDefineForClassFields": true,
6
+ "lib": ["ES2022", "DOM", "DOM.Iterable"],
7
+ "module": "ESNext",
8
+ "types": ["vite/client"],
9
+ "skipLibCheck": true,
10
+
11
+ /* Bundler mode */
12
+ "moduleResolution": "bundler",
13
+ "allowImportingTsExtensions": true,
14
+ "verbatimModuleSyntax": true,
15
+ "moduleDetection": "force",
16
+ "noEmit": true,
17
+ "jsx": "react-jsx",
18
+
19
+ /* Linting */
20
+ "strict": true,
21
+ "noUnusedLocals": true,
22
+ "noUnusedParameters": true,
23
+ "erasableSyntaxOnly": true,
24
+ "noFallthroughCasesInSwitch": true,
25
+ "noUncheckedSideEffectImports": true
26
+ },
27
+ "include": ["frontend/src"]
28
+ }
tsconfig.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "files": [],
3
+ "references": [
4
+ { "path": "./tsconfig.app.json" },
5
+ { "path": "./tsconfig.node.json" }
6
+ ]
7
+ }
tsconfig.node.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
4
+ "target": "ES2023",
5
+ "lib": ["ES2023"],
6
+ "module": "ESNext",
7
+ "types": ["node"],
8
+ "skipLibCheck": true,
9
+
10
+ /* Bundler mode */
11
+ "moduleResolution": "bundler",
12
+ "allowImportingTsExtensions": true,
13
+ "verbatimModuleSyntax": true,
14
+ "moduleDetection": "force",
15
+ "noEmit": true,
16
+
17
+ /* Linting */
18
+ "strict": true,
19
+ "noUnusedLocals": true,
20
+ "noUnusedParameters": true,
21
+ "erasableSyntaxOnly": true,
22
+ "noFallthroughCasesInSwitch": true,
23
+ "noUncheckedSideEffectImports": true
24
+ },
25
+ "include": ["vite.config.ts"]
26
+ }
vite.config.ts ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig } from 'vite'
2
+ import react from '@vitejs/plugin-react'
3
+ import tailwindcss from '@tailwindcss/vite'
4
+
5
+ // https://vite.dev/config/
6
+ export default defineConfig({
7
+ root: "frontend",
8
+ plugins: [
9
+ react(),
10
+ tailwindcss(),
11
+ ],
12
+ })