Spaces:
Sleeping
Sleeping
Commit ·
588df39
1
Parent(s): 33af2b4
Add application file
Browse files- .gitattributes +2 -0
- Dockerfile +25 -0
- __init__.py +0 -0
- app.py +90 -0
- core.py +235 -0
- documentation/AmberAI_concept.txt +149 -0
- documentation/AmberAI_effect.txt +7 -0
- documentation/AmberAI_examples.txt +79 -0
- json_parser.py +46 -0
- requirements.txt +8 -0
- static/favicon.ico +3 -0
- static/forest_tent.png +3 -0
- static/holter.jpg +3 -0
- static/kratos.jpg +3 -0
- static/mic.png +3 -0
- static/mountain_tent.jpg +3 -0
- static/my_room.png +3 -0
- static/opensans.ttf +3 -0
- static/refresh.png +3 -0
- static/resume_sample.webp +3 -0
- static/saturn_render_color_transparent.png +3 -0
- static/script.js +3 -0
- static/stark.jpg +3 -0
- static/styles.css +3 -0
- static/wimg.jpeg +3 -0
- temp_audio/latest.wav +0 -0
- templates/index.html +221 -0
- tree.py +78 -0
- tree_data/company_tree_data.json +40 -0
- tree_data/company_tree_data.pkl +3 -0
- tree_data/portfolio_tree_data.json +951 -0
- tree_data/portfolio_tree_data.pkl +3 -0
- util.py +6 -0
- voice.py +53 -0
- vosk-model-small-en-us-0.15/README +3 -0
- vosk-model-small-en-us-0.15/am/final.mdl +3 -0
- vosk-model-small-en-us-0.15/conf/mfcc.conf +3 -0
- vosk-model-small-en-us-0.15/conf/model.conf +3 -0
- vosk-model-small-en-us-0.15/graph/Gr.fst +3 -0
- vosk-model-small-en-us-0.15/graph/HCLr.fst +3 -0
- vosk-model-small-en-us-0.15/graph/disambig_tid.int +3 -0
- vosk-model-small-en-us-0.15/graph/phones/word_boundary.int +3 -0
- vosk-model-small-en-us-0.15/ivector/final.dubm +3 -0
- vosk-model-small-en-us-0.15/ivector/final.ie +3 -0
- vosk-model-small-en-us-0.15/ivector/final.mat +3 -0
- vosk-model-small-en-us-0.15/ivector/global_cmvn.stats +3 -0
- vosk-model-small-en-us-0.15/ivector/online_cmvn.conf +3 -0
- vosk-model-small-en-us-0.15/ivector/splice.conf +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
vosk-model-small-en-us-0.15/** filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
static/** filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 2 |
+
# you will also find guides on how best to write your Dockerfile
|
| 3 |
+
|
| 4 |
+
FROM python:3.9
|
| 5 |
+
|
| 6 |
+
# Create a user for security
|
| 7 |
+
RUN useradd -m -u 1000 user
|
| 8 |
+
USER user
|
| 9 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 10 |
+
|
| 11 |
+
# Set working directory
|
| 12 |
+
WORKDIR /app
|
| 13 |
+
|
| 14 |
+
# Install Python dependencies
|
| 15 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 16 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 17 |
+
|
| 18 |
+
# Copy the rest of the code
|
| 19 |
+
COPY --chown=user . /app
|
| 20 |
+
|
| 21 |
+
# NLTK needs tokenizer data
|
| 22 |
+
RUN python -m nltk.downloader punkt
|
| 23 |
+
|
| 24 |
+
# Run FastAPI app via Uvicorn on port 7860 (used by Hugging Face Spaces)
|
| 25 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
__init__.py
ADDED
|
File without changes
|
app.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, Request
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from fastapi.responses import HTMLResponse
|
| 4 |
+
from fastapi.templating import Jinja2Templates
|
| 5 |
+
from fastapi.staticfiles import StaticFiles
|
| 6 |
+
from fastapi import File, UploadFile
|
| 7 |
+
|
| 8 |
+
import shutil
|
| 9 |
+
from pydantic import BaseModel
|
| 10 |
+
|
| 11 |
+
from core import interact_with_user, cache_embeddings
|
| 12 |
+
from voice import transcribe_audio
|
| 13 |
+
|
| 14 |
+
from json_parser import tree_from_json
|
| 15 |
+
|
| 16 |
+
from tree import struct_tree
|
| 17 |
+
from time import time
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# -------- FastAPI setup --------
|
| 21 |
+
app = FastAPI()
|
| 22 |
+
|
| 23 |
+
# Enable CORS (optional if serving HTML from FastAPI itself)
|
| 24 |
+
app.add_middleware(
|
| 25 |
+
CORSMiddleware,
|
| 26 |
+
allow_origins=["*"], # In production, replace with specific domains
|
| 27 |
+
allow_credentials=True,
|
| 28 |
+
allow_methods=["*"],
|
| 29 |
+
allow_headers=["*"],
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
app.mount("/static", StaticFiles(directory="static"), name="static")
|
| 33 |
+
|
| 34 |
+
# Set up HTML templating
|
| 35 |
+
templates = Jinja2Templates(directory="templates")
|
| 36 |
+
|
| 37 |
+
@app.get("/", response_class=HTMLResponse)
|
| 38 |
+
async def serve_index(request: Request):
|
| 39 |
+
return templates.TemplateResponse("index.html", {"request": request})
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# -------- Load tree and model once --------
|
| 43 |
+
filepath_root = "tree_data"
|
| 44 |
+
filepath_naive = "portfolio_tree_data.json" # Changing this line should change the context completely.
|
| 45 |
+
filepath = f"{filepath_root}/{filepath_naive}"
|
| 46 |
+
|
| 47 |
+
print("\n--- Loading model and data ---")
|
| 48 |
+
|
| 49 |
+
s = time()
|
| 50 |
+
tree_file = tree_from_json(filepath)
|
| 51 |
+
e = time()
|
| 52 |
+
print(f"Time taken to load tree from json: {e - s:.2f} seconds")
|
| 53 |
+
|
| 54 |
+
s = time()
|
| 55 |
+
cache_embeddings(tree_file)
|
| 56 |
+
e = time()
|
| 57 |
+
print(f"Time taken to cache embeddings: {e - s:.2f} seconds")
|
| 58 |
+
|
| 59 |
+
tree_file.save(filepath[:-5] + ".pkl")
|
| 60 |
+
|
| 61 |
+
s = time()
|
| 62 |
+
tree_data = struct_tree.load(filepath[:-5] + ".pkl")
|
| 63 |
+
e = time()
|
| 64 |
+
print(f"Time taken to load tree from pkl: {e - s:.2f} seconds")
|
| 65 |
+
|
| 66 |
+
print("Amber is ready.\n")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# -------- API Endpoint --------
|
| 70 |
+
class Query(BaseModel):
|
| 71 |
+
query: str
|
| 72 |
+
|
| 73 |
+
@app.post("/ask")
|
| 74 |
+
def ask_amber(query: Query):
|
| 75 |
+
user_input = query.query
|
| 76 |
+
response = interact_with_user(tree_data, user_input)
|
| 77 |
+
return {"response": response}
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@app.post("/transcribe")
|
| 81 |
+
async def transcribe(file: UploadFile = File(...)):
|
| 82 |
+
filename = file.filename
|
| 83 |
+
filepath = f"temp_audio/{filename}"
|
| 84 |
+
|
| 85 |
+
with open(filepath, "wb") as buffer:
|
| 86 |
+
shutil.copyfileobj(file.file, buffer)
|
| 87 |
+
|
| 88 |
+
# Call the transcribe function
|
| 89 |
+
result = transcribe_audio(filepath)
|
| 90 |
+
return {"transcription": result}
|
core.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # Silence warnings
|
| 3 |
+
|
| 4 |
+
from sentence_transformers import SentenceTransformer, util
|
| 5 |
+
|
| 6 |
+
from nltk.tokenize import sent_tokenize
|
| 7 |
+
|
| 8 |
+
from util import select_random_from_list
|
| 9 |
+
|
| 10 |
+
from math import exp
|
| 11 |
+
|
| 12 |
+
all_MiniLM_L12_v2 = SentenceTransformer("all-MiniLM-L12-v2")
|
| 13 |
+
"""
|
| 14 |
+
all-MiniLM-L12-v2 is a sentence embedding model used for tasks involving semantic textual similarity, clustering, semantic search, and information retrieval. They convert a sentence to tensor based on their intent and then matching patterns like cos_sim can be used to compare them to other sentences.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
EXIT_MESSAGE = "Feel free to return any time you want."
|
| 18 |
+
|
| 19 |
+
CONNECTION_PHRASES = ["just like that", "for the same", "similarly", "similar to the previous", "for that", "for it"]
|
| 20 |
+
CONNECTION_ENCODE = all_MiniLM_L12_v2.encode(CONNECTION_PHRASES,convert_to_tensor=True)
|
| 21 |
+
|
| 22 |
+
prev_label = ""
|
| 23 |
+
prev_query_data = [] # Stores previous context if queries contain ambiguous content that may map to previous responses
|
| 24 |
+
|
| 25 |
+
confidence_threshold = 0.35 # Default confidence threshold
|
| 26 |
+
def generate_confidence_threshold(query: str, base=0.6, decay=0.03, min_threshold=0.25)->float:
|
| 27 |
+
"""Generate confidence threshold based on the sentence. Longer sentence lead to lower confidences, so confidence threshold is adjusted based on that.
|
| 28 |
+
Parameters:
|
| 29 |
+
1. query: Modify threshold based on this sentence
|
| 30 |
+
2. base, decay: 0.8*e^(-(decay * no. of words in string))
|
| 31 |
+
3. min_threshold: Clamp to minimum to avoid much lower confidence values"""
|
| 32 |
+
global confidence_threshold
|
| 33 |
+
length = len(query.split())
|
| 34 |
+
confidence_threshold = max(base * exp(-decay * length), min_threshold)
|
| 35 |
+
|
| 36 |
+
# The value of each node contains the following data
|
| 37 |
+
# node.value[0] -> intent
|
| 38 |
+
# node.value[1] -> label
|
| 39 |
+
# node.value[2] -> examples
|
| 40 |
+
# node.value[3] -> response
|
| 41 |
+
# node.value[4] -> children
|
| 42 |
+
|
| 43 |
+
def cache_embeddings(tree, model = all_MiniLM_L12_v2)->None:
|
| 44 |
+
"""Store the encoded examples as part of the tree itself to avoid repetitive computations.
|
| 45 |
+
Parameters:
|
| 46 |
+
1. tree: Tree to cache embeddings
|
| 47 |
+
2. model: Which model to use to encode (Default: Global model all_MiniLM_L12_v2)"""
|
| 48 |
+
|
| 49 |
+
def _cache_node_embeddings(n):
|
| 50 |
+
if isinstance(n.value, tuple) and len(n.value) >= 2:
|
| 51 |
+
examples = n.value[2]
|
| 52 |
+
n.embedding_cache = model.encode(examples, convert_to_tensor=True)
|
| 53 |
+
for child in n.children:
|
| 54 |
+
_cache_node_embeddings(child)
|
| 55 |
+
_cache_node_embeddings(tree.root)
|
| 56 |
+
|
| 57 |
+
CLEAR_MESSAGES = ["delete", "delete context", "delete history", "clear", "clear context", "clear history", "reset", "reset context", "reset chat", "forget", "forget all"]
|
| 58 |
+
CLEAR_MESSAGES_ENCODE = all_MiniLM_L12_v2.encode(CLEAR_MESSAGES,convert_to_tensor=True)
|
| 59 |
+
|
| 60 |
+
SECOND_PERSON_MENTIONS = ["you", "your", "yours", "yourself", "y'all", "y'all's", "y'all'self", "you're", "your'e""u", "ur", "urs", "urself"]
|
| 61 |
+
def get_user_query(message="", model = all_MiniLM_L12_v2)->str:
|
| 62 |
+
"""Separate function to get input from user.
|
| 63 |
+
Parameters:
|
| 64 |
+
1. message: Show message to user before recieving input (Default: empty)
|
| 65 |
+
2. model: Which model to use to encode (Default: Global model all_MiniLM_L12_v2)"""
|
| 66 |
+
|
| 67 |
+
query = input(message).lower().strip()
|
| 68 |
+
while query == "":
|
| 69 |
+
query = input(message).lower().strip()
|
| 70 |
+
|
| 71 |
+
query = query.replace(" "," ") # Remove double spaces
|
| 72 |
+
for spm in SECOND_PERSON_MENTIONS: # Remove second person mentions
|
| 73 |
+
query = query.replace(spm,"Amber AI") # Replace with bot name
|
| 74 |
+
generate_confidence_threshold(query)
|
| 75 |
+
query_encode = model.encode(query, convert_to_tensor=True)
|
| 76 |
+
clear_intent = util.cos_sim(query_encode,CLEAR_MESSAGES_ENCODE).max().item()
|
| 77 |
+
if clear_intent > confidence_threshold:
|
| 78 |
+
return None
|
| 79 |
+
return query
|
| 80 |
+
|
| 81 |
+
def _calculate_single_level(user_embed,predicted_intent):
|
| 82 |
+
"""Calculate predictions for the children of a single node. Each node contains a list of nodes as its children.
|
| 83 |
+
Parameters:
|
| 84 |
+
1. user_embed: User query converted to tensor
|
| 85 |
+
2. predicted_intent: Calculate for children of this node"""
|
| 86 |
+
|
| 87 |
+
categories = predicted_intent.children # List of node objects
|
| 88 |
+
predicted_intent = None
|
| 89 |
+
high_intent = 0
|
| 90 |
+
for category in categories:
|
| 91 |
+
if category.embedding_cache is None:
|
| 92 |
+
raise ValueError("Embedding cache missing. Call cache_embeddings() on the tree first")
|
| 93 |
+
score = util.cos_sim(user_embed, category.embedding_cache).max().item()
|
| 94 |
+
|
| 95 |
+
if score > high_intent:
|
| 96 |
+
high_intent = score
|
| 97 |
+
predicted_intent = category # Node object
|
| 98 |
+
return (predicted_intent,high_intent) # Returns the child node with the highest prediction confidence and the confidence value
|
| 99 |
+
|
| 100 |
+
def _store_prev_data(predicted_intent):
|
| 101 |
+
"""Store the previous computed data path.
|
| 102 |
+
Parameters:
|
| 103 |
+
1. predicted_intent: Store previous data w.r.t this node"""
|
| 104 |
+
# Mutating global prev_query_data
|
| 105 |
+
prev_query_data.clear()
|
| 106 |
+
prev_context_treenode = predicted_intent
|
| 107 |
+
while prev_context_treenode.parent: # Stop at tree root
|
| 108 |
+
prev_query_data.append(prev_context_treenode)
|
| 109 |
+
prev_context_treenode = prev_context_treenode.parent
|
| 110 |
+
|
| 111 |
+
def h_pass(tree, user_embed, predicted_intent = None)->tuple:
|
| 112 |
+
"""Use the model to pass through the tree to compare it with the user query in a hierarchical manner and return an output.
|
| 113 |
+
Parameters:
|
| 114 |
+
1. tree: Which tree to pass through hierarchically
|
| 115 |
+
2. user_embed: User input converted to tensor
|
| 116 |
+
3. predicted_intent: Where to start the pass from (Default: Root of the tree)"""
|
| 117 |
+
global prev_label
|
| 118 |
+
predicted_intent = tree.root if predicted_intent == None else predicted_intent
|
| 119 |
+
predicted_intent_parent = None
|
| 120 |
+
high_intent = 0
|
| 121 |
+
passed_once = False
|
| 122 |
+
pass_through_intent = {}
|
| 123 |
+
while predicted_intent.children: # If the node has children, check for the child with the highest confidence value
|
| 124 |
+
predicted_intent_parent = predicted_intent
|
| 125 |
+
predicted_intent, high_intent = _calculate_single_level(user_embed,predicted_intent)
|
| 126 |
+
pass_through_intent[predicted_intent] = high_intent # Store the confidence value of the current node
|
| 127 |
+
if passed_once: # If the data didn't pass even once, then don't store it
|
| 128 |
+
_store_prev_data(predicted_intent_parent) # Storing previous data w.r.t parent node as context is changed from current node
|
| 129 |
+
if high_intent < confidence_threshold: # If highest confidence value is still too low, stop.
|
| 130 |
+
prev_label = predicted_intent_parent.value[1]
|
| 131 |
+
return (predicted_intent, predicted_intent_parent, high_intent, passed_once, False, pass_through_intent) # If the confidence value is low, stop
|
| 132 |
+
passed_once = True
|
| 133 |
+
|
| 134 |
+
_store_prev_data(predicted_intent)
|
| 135 |
+
prev_label = predicted_intent.value[1]
|
| 136 |
+
return (predicted_intent, predicted_intent_parent, high_intent, passed_once, True, pass_through_intent)
|
| 137 |
+
|
| 138 |
+
def query_pass(tree, user_input, model=all_MiniLM_L12_v2)->list:
|
| 139 |
+
"""Separate multiple queries into separate single ones, analyze relation between them if any, and process them to give an output while storing incomplete query outputs in non-leaf list, which contains the current level of context.
|
| 140 |
+
Parameters:
|
| 141 |
+
1. tree: Which tree to pass through hierarchically
|
| 142 |
+
2. user_input: User input that may contain one or more queries as a string
|
| 143 |
+
3. model: Which model to use to encode (Default: Global model all_MiniLM_L12_v2)"""
|
| 144 |
+
|
| 145 |
+
queries = sent_tokenize(user_input)
|
| 146 |
+
user_embeddings = [model.encode(query,convert_to_tensor=True) for query in queries]
|
| 147 |
+
result = []
|
| 148 |
+
label = prev_label
|
| 149 |
+
|
| 150 |
+
for i in range(len(queries)):
|
| 151 |
+
generate_confidence_threshold(queries[i])
|
| 152 |
+
pass_value = (None, None, 0, False, False, None)
|
| 153 |
+
# pass_value[0] -> current predicted intention (node)
|
| 154 |
+
# pass_value[1] -> parent node of current predicted intention
|
| 155 |
+
# pass_value[2] -> confidence level
|
| 156 |
+
# pass_value[3] -> has the query passed through the model atleast once?
|
| 157 |
+
# pass_value[4] -> has the query reached a leaf node?
|
| 158 |
+
# pass_value[5] -> confidence values of traversal for query [DEBUGGING PURPOSES]
|
| 159 |
+
|
| 160 |
+
# Acquiring data from previous query if the query has words matching with connecting phrases
|
| 161 |
+
conn_sim = util.cos_sim(user_embeddings[i], CONNECTION_ENCODE).max().item()
|
| 162 |
+
if conn_sim > confidence_threshold:
|
| 163 |
+
queries[i] = queries[i] + label
|
| 164 |
+
user_embeddings[i] = model.encode(queries[i], convert_to_tensor=True)
|
| 165 |
+
|
| 166 |
+
# Pass values through the root node and the nodes that have the current context
|
| 167 |
+
pass_value_root = h_pass(tree,user_embeddings[i]) # Passing through root node
|
| 168 |
+
pass_value_nonleaf = [h_pass(tree,user_embeddings[i],j) for j in prev_query_data] # Passing through nodes that have current context
|
| 169 |
+
all_nodes = [pass_value_root] + pass_value_nonleaf # List of all nodes that have been passed through
|
| 170 |
+
pass_value = max(all_nodes, key=lambda x: x[2]) # Maximum confidence node for available context. Root is always a context.
|
| 171 |
+
print(f"Query reach confidence: {[i[5] for i in all_nodes]}") # DEBUGGING PURPOSES
|
| 172 |
+
|
| 173 |
+
if pass_value[3]: # If the query has passed at least once, ask for data and store current result
|
| 174 |
+
if not pass_value[4]: # If pass has not reached a leaf node, then ask for more data from the user and keep parent context
|
| 175 |
+
label = pass_value[1].value[1]
|
| 176 |
+
result.append(f"{pass_value[1].value[3]}")
|
| 177 |
+
# continue
|
| 178 |
+
|
| 179 |
+
else: # Query has reached a leaf node
|
| 180 |
+
label = pass_value[0].value[1]
|
| 181 |
+
result.append(pass_value[0].value[3])
|
| 182 |
+
# continue
|
| 183 |
+
|
| 184 |
+
else: # Query has not passed even once. Check if it works when previous context is available
|
| 185 |
+
for parent_context in prev_query_data:
|
| 186 |
+
pass_value_context = h_pass(tree, user_embeddings[i], parent_context)
|
| 187 |
+
if pass_value_context[3]: # Check if it has passed at least once
|
| 188 |
+
# If it has passed, then the query is valid
|
| 189 |
+
if not pass_value_context[4]: # If pass has not reached a leaf node, then ask for more data from the user and keep parent context
|
| 190 |
+
label = pass_value_context[1].value[1]
|
| 191 |
+
result.append(f"What are you looking for in {pass_value_context[1].value[0]}? {pass_value_context[1].value[3]}")
|
| 192 |
+
else:
|
| 193 |
+
label = pass_value_context[0].value[1]
|
| 194 |
+
result.append(pass_value_context[0].value[3])
|
| 195 |
+
break # The else block won't be executed if code reaches here
|
| 196 |
+
else: # The else statement of a for loop will execute only if the loop completes, and won't execute when broken by "break"
|
| 197 |
+
result.append(f"I don't quite understand what you are trying to ask by \"{queries[i]}\"")
|
| 198 |
+
# continue
|
| 199 |
+
# End of "for" loop processing queries
|
| 200 |
+
|
| 201 |
+
# Finally, return result. A list of responses same as the length of queries.
|
| 202 |
+
return result
|
| 203 |
+
|
| 204 |
+
def process_user_query(query: str, model = all_MiniLM_L12_v2)->str:
|
| 205 |
+
"""Separate function to get input from user.
|
| 206 |
+
Parameters:
|
| 207 |
+
1. message: Show message to user before recieving input (Default: empty)
|
| 208 |
+
2. model: Which model to use to encode (Default: Global model all_MiniLM_L12_v2)"""
|
| 209 |
+
|
| 210 |
+
query = query.lower().strip()
|
| 211 |
+
generate_confidence_threshold(query)
|
| 212 |
+
query_encode = model.encode(query, convert_to_tensor=True)
|
| 213 |
+
clear_intent = util.cos_sim(query_encode,CLEAR_MESSAGES_ENCODE).max().item()
|
| 214 |
+
if clear_intent > confidence_threshold:
|
| 215 |
+
return None
|
| 216 |
+
return query
|
| 217 |
+
|
| 218 |
+
def interact_with_user(tree_data, user_input: str) -> str:
|
| 219 |
+
"""Handles a single user query and returns a response string."""
|
| 220 |
+
user_input = process_user_query(user_input)
|
| 221 |
+
all_results = []
|
| 222 |
+
if user_input: # If not empty or command
|
| 223 |
+
results = query_pass(tree_data, user_input)
|
| 224 |
+
for result in results:
|
| 225 |
+
# return f"{select_random_from_list(result)}\nContext window: {prev_query_data}"
|
| 226 |
+
all_results.append(f"{select_random_from_list(result)}")
|
| 227 |
+
print(f"Previous query data: {prev_query_data}")
|
| 228 |
+
return all_results
|
| 229 |
+
else:
|
| 230 |
+
# Mutating global variables: Clearing context and recent history on command
|
| 231 |
+
prev_query_data.clear()
|
| 232 |
+
print(f"Previous query data: {prev_query_data}")
|
| 233 |
+
return ["Cleared previous context"]
|
| 234 |
+
|
| 235 |
+
|
documentation/AmberAI_concept.txt
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 📄 **Composable Answer Engine: A Deterministic, Low-Compute Alternative to Generative Chatbots**
|
| 2 |
+
|
| 3 |
+
## ✦ Overview
|
| 4 |
+
|
| 5 |
+
The **Composable Answer Engine** is a novel approach to building intelligent, language-aware question-answering systems. Unlike conventional chatbots (which are rigid and brittle) or large language models (which are resource-intensive and prone to hallucination), this system offers a **middle path**: it provides natural, flexible, and contextually relevant responses by **composing pre-approved answer fragments** rather than generating free-form language.
|
| 6 |
+
|
| 7 |
+
The system is designed to be **safe, efficient, and domain-controlled**, making it ideal for business applications where **accuracy, auditability, and trust** are more important than creative generation.
|
| 8 |
+
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
## ✦ Problem Statement
|
| 12 |
+
|
| 13 |
+
Existing approaches to automated customer or user interaction fall into two categories:
|
| 14 |
+
|
| 15 |
+
| Approach | Description | Limitations |
|
| 16 |
+
|---------|-------------|-------------|
|
| 17 |
+
| **Rule-based chatbots** | Pre-scripted flows, keyword matching | Inflexible, brittle, poor handling of paraphrasing |
|
| 18 |
+
| **LLM-powered assistants** | Use transformers to generate fluent answers | Require high compute, risk hallucination, hard to control, privacy/legal concerns |
|
| 19 |
+
|
| 20 |
+
**Challenge:**
|
| 21 |
+
There is no widely adopted approach that combines:
|
| 22 |
+
- Language understanding
|
| 23 |
+
- Deterministic, auditable answers
|
| 24 |
+
- Low computational overhead
|
| 25 |
+
- Modular knowledge expansion
|
| 26 |
+
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+
## ✦ Proposed Solution
|
| 30 |
+
|
| 31 |
+
The **Composable Answer Engine (CAE)** answers natural language questions by **matching user input to semantically similar phrases** and **stitching together pre-written, approved text segments**.
|
| 32 |
+
|
| 33 |
+
It avoids neural generation entirely and instead uses **retrieval + controlled composition** to ensure:
|
| 34 |
+
- No hallucinations
|
| 35 |
+
- Full traceability
|
| 36 |
+
- Cost-effective deployment
|
| 37 |
+
|
| 38 |
+
---
|
| 39 |
+
|
| 40 |
+
## ✦ System Architecture
|
| 41 |
+
|
| 42 |
+
```plaintext
|
| 43 |
+
+------------------------+
|
| 44 |
+
| User Query (Text) |
|
| 45 |
+
+-----------+------------+
|
| 46 |
+
|
|
| 47 |
+
v
|
| 48 |
+
+------------------------+
|
| 49 |
+
| Embed with Sentence |
|
| 50 |
+
| Transformer (e.g., |
|
| 51 |
+
| MiniLM or MPNet) |
|
| 52 |
+
+-----------+------------+
|
| 53 |
+
|
|
| 54 |
+
v
|
| 55 |
+
+------------------------+
|
| 56 |
+
| Semantic Similarity |
|
| 57 |
+
| Search (e.g., FAISS, |
|
| 58 |
+
| cosine sim over corpus)|
|
| 59 |
+
+-----------+------------+
|
| 60 |
+
|
|
| 61 |
+
v
|
| 62 |
+
+------------------------+
|
| 63 |
+
| Phrase Assembly Module |
|
| 64 |
+
| - Match answer blocks |
|
| 65 |
+
| - Apply rules if needed|
|
| 66 |
+
+-----------+------------+
|
| 67 |
+
|
|
| 68 |
+
v
|
| 69 |
+
+------------------------+
|
| 70 |
+
| Output Answer |
|
| 71 |
+
+------------------------+
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
---
|
| 75 |
+
|
| 76 |
+
## ✦ Key Components
|
| 77 |
+
|
| 78 |
+
### 1. **Sentence Embeddings**
|
| 79 |
+
- Uses small pre-trained models (e.g., `all-MiniLM-L6-v2`) from [Sentence Transformers](https://www.sbert.net/) to embed queries and stored phrases into a shared semantic space.
|
| 80 |
+
|
| 81 |
+
### 2. **Answer Corpus**
|
| 82 |
+
- Structured markdown, YAML, or database entries containing:
|
| 83 |
+
- `questions:` → example paraphrases
|
| 84 |
+
- `answer:` → approved response text
|
| 85 |
+
- Easily editable by non-technical staff.
|
| 86 |
+
|
| 87 |
+
### 3. **Similarity Engine**
|
| 88 |
+
- Uses cosine similarity or vector indexing (FAISS) to retrieve the closest matching content block(s).
|
| 89 |
+
|
| 90 |
+
### 4. **Answer Composer**
|
| 91 |
+
- Returns the most appropriate answer verbatim or assembles one using adjacent text blocks (if rule-based composition is enabled).
|
| 92 |
+
|
| 93 |
+
### 5. **Fallback Handler**
|
| 94 |
+
- If confidence is low, the system can:
|
| 95 |
+
- Prompt the user to rephrase
|
| 96 |
+
- Escalate to a human
|
| 97 |
+
- Log the query for admin review and knowledge base expansion
|
| 98 |
+
|
| 99 |
+
---
|
| 100 |
+
|
| 101 |
+
## ✦ Benefits
|
| 102 |
+
|
| 103 |
+
| Feature | Advantage |
|
| 104 |
+
|--------|-----------|
|
| 105 |
+
| 💻 **Low compute** | Runs on CPU; no GPUs or cloud inference needed |
|
| 106 |
+
| 🛑 **No hallucination** | Outputs only pre-approved content |
|
| 107 |
+
| 🔐 **High privacy** | No sensitive data leaves the device |
|
| 108 |
+
| ✏️ **Easily editable** | Add new knowledge via flat files (Markdown/YAML/CSV) |
|
| 109 |
+
| 🧠 **Semantic understanding** | Handles paraphrasing and natural queries |
|
| 110 |
+
| 📑 **Explainable** | Every output is traceable to exact input fragments |
|
| 111 |
+
| 🔄 **Extendable** | Modular design allows domain-specific packs |
|
| 112 |
+
|
| 113 |
+
---
|
| 114 |
+
|
| 115 |
+
## ✦ Limitations
|
| 116 |
+
|
| 117 |
+
| Limitation | Description |
|
| 118 |
+
|------------|-------------|
|
| 119 |
+
| ❌ No generative creativity | Cannot form novel phrasing outside stored fragments |
|
| 120 |
+
| ⚖️ Depends on knowledge base quality | Poorly structured content = poor results |
|
| 121 |
+
| 🧩 Answer composition complexity | Combining fragments can sound robotic if not carefully curated |
|
| 122 |
+
| 📚 No "reasoning" | Cannot synthesize new conclusions or multi-hop logic |
|
| 123 |
+
|
| 124 |
+
---
|
| 125 |
+
|
| 126 |
+
## ✦ Ideal Use Cases
|
| 127 |
+
|
| 128 |
+
- Internal company assistants (HR, IT helpdesk)
|
| 129 |
+
- Customer service bots with strict compliance needs
|
| 130 |
+
- Healthcare, finance, law domains requiring controlled outputs
|
| 131 |
+
- Offline-first applications (e.g., field tools, kiosks)
|
| 132 |
+
- Educational tools where factual consistency is vital
|
| 133 |
+
|
| 134 |
+
---
|
| 135 |
+
|
| 136 |
+
## ✦ Future Work
|
| 137 |
+
|
| 138 |
+
- **Lightweight personalization** using user preferences stored locally
|
| 139 |
+
- **Admin dashboard** to monitor unanswered queries and expand corpus
|
| 140 |
+
- **Domain-specific packs** (e.g., `legal_pack.md`, `finance_pack.md`)
|
| 141 |
+
- **Optional smoothing layer** (rule-based grammar post-processing)
|
| 142 |
+
|
| 143 |
+
---
|
| 144 |
+
|
| 145 |
+
## ✦ Summary
|
| 146 |
+
|
| 147 |
+
The **Composable Answer Engine** is a safe, practical, and explainable alternative to current chatbot paradigms. It allows organizations to deliver intelligent, responsive user interaction **without the risks, costs, or unpredictability** of full LLM-based solutions.
|
| 148 |
+
|
| 149 |
+
Its hybrid approach — *semantic understanding + controlled response synthesis* — fills a major gap in the current AI ecosystem, especially in domains that demand trust, traceability, and low resource usage.
|
documentation/AmberAI_effect.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Designed a hybrid retrieval-based QA system using MiniLM for low-cost intent matching and FAISS for fast embedding lookup, enabling sub-100ms query response time.
|
| 2 |
+
|
| 3 |
+
Built a deterministic fallback logic engine to route user queries to curated, trusted answer sets without hallucination.
|
| 4 |
+
|
| 5 |
+
Optimized embedding pipeline for domain-specific intents (e.g., job queries, internal policies), achieving >90% hit accuracy across paraphrased inputs.
|
| 6 |
+
|
| 7 |
+
Engineered with deployment in mind: containerized, minimal memory footprint (<300MB), and LLM-free.
|
documentation/AmberAI_examples.txt
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
🔹 1. Course Offerings
|
| 2 |
+
Chatbot:
|
| 3 |
+
Query: Courses
|
| 4 |
+
Answer: AI, Data Science
|
| 5 |
+
Prompt: Click on the course you want to apply for or click back
|
| 6 |
+
|
| 7 |
+
LLM:
|
| 8 |
+
Query: What courses does this company offer?
|
| 9 |
+
Answer: Our company offers courses on AI and Data Science. Would you like to apply for one?
|
| 10 |
+
Prompt: None, the user enters it.
|
| 11 |
+
|
| 12 |
+
StitchAI:
|
| 13 |
+
Query: What courses does this company offer?
|
| 14 |
+
Answer: AI, Data Science [Not generated]
|
| 15 |
+
Prompt: None, the user enters it.
|
| 16 |
+
|
| 17 |
+
🔹 2. Office Hours
|
| 18 |
+
Chatbot:
|
| 19 |
+
Query: Office hours
|
| 20 |
+
Answer: 9AM – 5PM, Mon to Fri
|
| 21 |
+
Prompt: Press 1 to view branches, press 2 to go back
|
| 22 |
+
|
| 23 |
+
LLM:
|
| 24 |
+
Query: When is your office open during the week?
|
| 25 |
+
Answer: Our office is open from 9AM to 5PM, Monday to Friday.
|
| 26 |
+
Prompt: None
|
| 27 |
+
|
| 28 |
+
StitchAI:
|
| 29 |
+
Query: When is your office open during the week?
|
| 30 |
+
Answer: 9AM – 5PM, Mon to Fri [Not generated]
|
| 31 |
+
Prompt: None
|
| 32 |
+
|
| 33 |
+
🔹 3. Contact Information
|
| 34 |
+
Chatbot:
|
| 35 |
+
Query: Contact
|
| 36 |
+
Answer: support@example.com
|
| 37 |
+
Prompt: Click to send email or go back
|
| 38 |
+
|
| 39 |
+
LLM:
|
| 40 |
+
Query: How can I contact your support team?
|
| 41 |
+
Answer: You can reach our support team at support@example.com.
|
| 42 |
+
Prompt: None
|
| 43 |
+
|
| 44 |
+
StitchAI:
|
| 45 |
+
Query: How can I contact your support team?
|
| 46 |
+
Answer: support@example.com [Not generated]
|
| 47 |
+
Prompt: None
|
| 48 |
+
|
| 49 |
+
🔹 4. Refund Policy
|
| 50 |
+
Chatbot:
|
| 51 |
+
Query: Refunds
|
| 52 |
+
Answer: Refunds only within 7 days
|
| 53 |
+
Prompt: Click here to request refund
|
| 54 |
+
|
| 55 |
+
LLM:
|
| 56 |
+
Query: What's your company's refund policy?
|
| 57 |
+
Answer: Our company provides refunds only within 7 days of purchase.
|
| 58 |
+
Prompt: None
|
| 59 |
+
|
| 60 |
+
StitchAI:
|
| 61 |
+
Query: What's your company's refund policy?
|
| 62 |
+
Answer: Refunds only within 7 days [Not generated]
|
| 63 |
+
Prompt: None
|
| 64 |
+
|
| 65 |
+
🔹 5. Job Openings
|
| 66 |
+
Chatbot:
|
| 67 |
+
Query: Careers
|
| 68 |
+
Answer: No openings currently
|
| 69 |
+
Prompt: Go back
|
| 70 |
+
|
| 71 |
+
LLM:
|
| 72 |
+
Query: Are there any current job openings at your company?
|
| 73 |
+
Answer: Currently, we do not have any open positions. Please check back later.
|
| 74 |
+
Prompt: None
|
| 75 |
+
|
| 76 |
+
StitchAI:
|
| 77 |
+
Query: Are there any current job openings at your company?
|
| 78 |
+
Answer: No openings currently [Not generated]
|
| 79 |
+
Prompt: None
|
json_parser.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from tree import struct_tree
|
| 4 |
+
|
| 5 |
+
SECOND_PERSON_MENTIONS = ["you", "your", "yours", "yourself", "y'all", "y'all's", "y'all'self", "you're", "your'e""u", "ur", "urs", "urself"]
|
| 6 |
+
|
| 7 |
+
def tree_from_json(filepath):
|
| 8 |
+
"""
|
| 9 |
+
Load a tree from a structured JSON file.
|
| 10 |
+
Root node name is taken from the filename (without extension).
|
| 11 |
+
Each node must have: intent, examples, response, and optionally children.
|
| 12 |
+
"""
|
| 13 |
+
with open(filepath, 'r') as f:
|
| 14 |
+
data = json.load(f)
|
| 15 |
+
|
| 16 |
+
filename_intent = os.path.splitext(os.path.basename(filepath))[0]
|
| 17 |
+
root_value = (filename_intent, ["Everything"], data["response"])
|
| 18 |
+
tree = struct_tree(root_value)
|
| 19 |
+
|
| 20 |
+
def add_children(parent_node, children_data):
|
| 21 |
+
for child in children_data:
|
| 22 |
+
# Validation
|
| 23 |
+
if not all(k in child for k in ("intent", "label", "examples", "response")):
|
| 24 |
+
raise ValueError(f"Missing required fields in node: {child}")
|
| 25 |
+
|
| 26 |
+
intent = child["intent"]
|
| 27 |
+
label = child["label"]
|
| 28 |
+
|
| 29 |
+
examples = child["examples"]
|
| 30 |
+
# Replace second-person mentions with "Amber AI" to match context
|
| 31 |
+
# for i in range(len(examples)):
|
| 32 |
+
# for spm in SECOND_PERSON_MENTIONS:
|
| 33 |
+
# examples[i] = examples[i].replace(spm, "Amber AI")
|
| 34 |
+
|
| 35 |
+
response = child["response"]
|
| 36 |
+
|
| 37 |
+
value = (intent, label, examples, response)
|
| 38 |
+
new_node = tree.add_node(parent_node, value)
|
| 39 |
+
|
| 40 |
+
if "children" in child:
|
| 41 |
+
add_children(new_node, child["children"])
|
| 42 |
+
|
| 43 |
+
if "children" in data:
|
| 44 |
+
add_children(tree.root, data["children"])
|
| 45 |
+
|
| 46 |
+
return tree
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
sentence-transformers
|
| 4 |
+
nltk
|
| 5 |
+
pydantic
|
| 6 |
+
vosk
|
| 7 |
+
jinja2
|
| 8 |
+
python-multipart
|
static/favicon.ico
ADDED
|
|
Git LFS Details
|
static/forest_tent.png
ADDED
|
Git LFS Details
|
static/holter.jpg
ADDED
|
Git LFS Details
|
static/kratos.jpg
ADDED
|
Git LFS Details
|
static/mic.png
ADDED
|
Git LFS Details
|
static/mountain_tent.jpg
ADDED
|
Git LFS Details
|
static/my_room.png
ADDED
|
Git LFS Details
|
static/opensans.ttf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6530480277da62efde047eb26e78a7e532d1cfaeec91603e68d63876b9669f0d
|
| 3 |
+
size 130832
|
static/refresh.png
ADDED
|
Git LFS Details
|
static/resume_sample.webp
ADDED
|
Git LFS Details
|
static/saturn_render_color_transparent.png
ADDED
|
Git LFS Details
|
static/script.js
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:748cc42518a80b222f2ddc36f4af2c9e6819a95e810642b234eb0908205d951f
|
| 3 |
+
size 4642
|
static/stark.jpg
ADDED
|
Git LFS Details
|
static/styles.css
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:47832d78b799fd1f20603a7601510ef0bd111d90b05e6f6b014feaa3b9ff155f
|
| 3 |
+
size 4206
|
static/wimg.jpeg
ADDED
|
Git LFS Details
|
temp_audio/latest.wav
ADDED
|
Binary file (40.2 kB). View file
|
|
|
templates/index.html
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<html>
|
| 2 |
+
|
| 3 |
+
<head>
|
| 4 |
+
<title>Amber</title>
|
| 5 |
+
<link rel="stylesheet" href="/static/styles.css" />
|
| 6 |
+
<link rel="icon" href="/static/favicon.ico" type="image/x-icon">
|
| 7 |
+
</head>
|
| 8 |
+
|
| 9 |
+
<body>
|
| 10 |
+
<div class="box" id="interactive">
|
| 11 |
+
|
| 12 |
+
<div class="chatc" id="chatc">
|
| 13 |
+
<input type="text" placeholder="Ask Amber" id="chat" />
|
| 14 |
+
<img src="/static/mic.png" id="mic">
|
| 15 |
+
<img src="/static/refresh.png" id="ref">
|
| 16 |
+
|
| 17 |
+
<!-- Chat section -->
|
| 18 |
+
<p>
|
| 19 |
+
<font style="color: #f23000; font-weight: bold;">Amber</font> <br>
|
| 20 |
+
Hi. I am Amber. Ask me anything.
|
| 21 |
+
</p>
|
| 22 |
+
<!-- End of chat section -->
|
| 23 |
+
</div>
|
| 24 |
+
</div>
|
| 25 |
+
<div class="side">
|
| 26 |
+
<center>
|
| 27 |
+
<font style="color: #f23000; font-weight: bold;">Introduction</font>
|
| 28 |
+
</center>
|
| 29 |
+
<p>
|
| 30 |
+
<font style="color: #0087f2; font-weight: bold;">Who am I?</font> <br>
|
| 31 |
+
Hi. I am Dwarakesh. Aspiring AI/ML engineer with a deep interest in how machines think and how they render pixels
|
| 32 |
+
to
|
| 33 |
+
the screen.
|
| 34 |
+
</p>
|
| 35 |
+
<p>
|
| 36 |
+
<font style="color: #0087f2; font-weight: bold;">Tech interests</font> <br>
|
| 37 |
+
I love to explore how machines learn and how GPUs render—juggling AI/ML projects and Vulkan code to understand
|
| 38 |
+
pixels and neural nets. Curiosity-driven, stubbornly hands-on.
|
| 39 |
+
</p>
|
| 40 |
+
<br>
|
| 41 |
+
<center>
|
| 42 |
+
<font style="color: #f23000; font-weight: bold;">Education</font>
|
| 43 |
+
</center>
|
| 44 |
+
<p>
|
| 45 |
+
<font style="color: #0087f2; font-weight: bold;">School and College</font> <br>
|
| 46 |
+
I studied in PSBB Millennium School, Coimbatore up until grade 8, Suguna Pip School, Coimbatore until grade 12,
|
| 47 |
+
and
|
| 48 |
+
pursuing my B.Tech in Computer Science and Engineering at Amrita Vishwa Vidyapeetham, Coimbatore.
|
| 49 |
+
</p>
|
| 50 |
+
<br>
|
| 51 |
+
<center>
|
| 52 |
+
<font style="color: #f23000; font-weight: bold;">Skills and expertise</font>
|
| 53 |
+
</center>
|
| 54 |
+
<p>
|
| 55 |
+
<font style="color: #0087f2; font-weight: bold;">Programming languages</font> <br>
|
| 56 |
+
• Python<br>
|
| 57 |
+
• Rust<br>
|
| 58 |
+
• C++<br>
|
| 59 |
+
• HTML<br>
|
| 60 |
+
• CSS<br>
|
| 61 |
+
• JavaScript<br>
|
| 62 |
+
• Java<br>
|
| 63 |
+
• Kotlin<br>
|
| 64 |
+
• Golang<br>
|
| 65 |
+
• Haskell<br>
|
| 66 |
+
</p>
|
| 67 |
+
<p>
|
| 68 |
+
<font style="color: #0087f2; font-weight: bold;">Tech stack</font> <br>
|
| 69 |
+
• Vulkan<br>
|
| 70 |
+
• OpenGL<br>
|
| 71 |
+
• PyTorch<br>
|
| 72 |
+
• Tensorflow<br>
|
| 73 |
+
• OpenCV<br>
|
| 74 |
+
• Django<br>
|
| 75 |
+
• FastAPI<br>
|
| 76 |
+
• Tokio<br>
|
| 77 |
+
• Rayon<br>
|
| 78 |
+
• WebGPU<br>
|
| 79 |
+
• CUDA<br>
|
| 80 |
+
</p>
|
| 81 |
+
</div>
|
| 82 |
+
<div class="side">
|
| 83 |
+
<center>
|
| 84 |
+
<font style="color: #f23000; font-weight: bold;">Projects</font>
|
| 85 |
+
</center>
|
| 86 |
+
<p>
|
| 87 |
+
<font style="color: #0087f2; font-weight: bold;">Amber</font> <br>
|
| 88 |
+
A chatbot fine-tuned on the MiniLM-L12-v2 model. It maps human language to structured answers using a hierarchical
|
| 89 |
+
traversal of its knowledge base. Designed to adapt context or update knowledge on the fly.
|
| 90 |
+
<br>
|
| 91 |
+
<a href="github.com"> ↗ View on Github </a>
|
| 92 |
+
</p>
|
| 93 |
+
<p>
|
| 94 |
+
<font style="color: #0087f2; font-weight: bold;">Arx Net</font> <br>
|
| 95 |
+
An open-source graph visualization tool powered by WebGPU. Built for real-time interaction with large graphs—ideal
|
| 96 |
+
for networks in biology, social media, or systems modeling.
|
| 97 |
+
<br>
|
| 98 |
+
<a href="github.com"> ↗ View on Github </a>
|
| 99 |
+
</p>
|
| 100 |
+
<br>
|
| 101 |
+
<center>
|
| 102 |
+
<font style="color: #f23000; font-weight: bold;">Work experience and competitions</font>
|
| 103 |
+
</center>
|
| 104 |
+
<p>
|
| 105 |
+
<font style="color: #0087f2; font-weight: bold;">Internships, Jobs, and Competitions</font> <br>
|
| 106 |
+
Participated in 3 hackathons. <br>
|
| 107 |
+
No internships or work experience (yet)
|
| 108 |
+
</p>
|
| 109 |
+
<br>
|
| 110 |
+
<center>
|
| 111 |
+
<font style="color: #f23000; font-weight: bold;">Contact</font>
|
| 112 |
+
</center>
|
| 113 |
+
<p>
|
| 114 |
+
<font style="color: #0087f2; font-weight: bold; font-size: 20px;">Contact link</font> <br>
|
| 115 |
+
• Email: <a href="https://www.github.com/Dwarakesh-V"> dwarakesh.2005.4@gmail.com </a> <br>
|
| 116 |
+
• Github: <a href="https://www.github.com/Dwarakesh-V"> https://www.github.com/Dwarakesh-V </a> <br>
|
| 117 |
+
• LinkedIn: <a href="https://www.linkedin.com/in/vdwarakesh/"> https://www.linkedin.com/in/vdwarakesh/
|
| 118 |
+
</a>
|
| 119 |
+
<br>
|
| 120 |
+
</p>
|
| 121 |
+
</div>
|
| 122 |
+
<div class="statics" id="statics">
|
| 123 |
+
<img src="/static/refresh.png" id="ref2">
|
| 124 |
+
<center>
|
| 125 |
+
<font style="color: #f23000; font-weight: bold;">Introduction</font>
|
| 126 |
+
</center>
|
| 127 |
+
<p>
|
| 128 |
+
<font style="color: #0087f2; font-weight: bold;">Who am I?</font> <br>
|
| 129 |
+
Hi. I am Dwarakesh. Aspiring AI/ML engineer with a deep interest in how machines think and how they render pixels
|
| 130 |
+
to
|
| 131 |
+
the screen.
|
| 132 |
+
</p>
|
| 133 |
+
<p>
|
| 134 |
+
<font style="color: #0087f2; font-weight: bold;">Tech interests</font> <br>
|
| 135 |
+
I love to explore how machines learn and how GPUs render—juggling AI/ML projects and Vulkan code to understand
|
| 136 |
+
pixels and neural nets. Curiosity-driven, stubbornly hands-on.
|
| 137 |
+
</p>
|
| 138 |
+
<br>
|
| 139 |
+
<center>
|
| 140 |
+
<font style="color: #f23000; font-weight: bold;">Education</font>
|
| 141 |
+
</center>
|
| 142 |
+
<p>
|
| 143 |
+
<font style="color: #0087f2; font-weight: bold;">School and College</font> <br>
|
| 144 |
+
I studied in PSBB Millennium School, Coimbatore up until grade 8, Suguna Pip School, Coimbatore until grade 12,
|
| 145 |
+
and
|
| 146 |
+
pursuing my B.Tech in Computer Science and Engineering at Amrita Vishwa Vidyapeetham, Coimbatore.
|
| 147 |
+
</p>
|
| 148 |
+
<br>
|
| 149 |
+
<center>
|
| 150 |
+
<font style="color: #f23000; font-weight: bold;">Skills and expertise</font>
|
| 151 |
+
</center>
|
| 152 |
+
<p>
|
| 153 |
+
<font style="color: #0087f2; font-weight: bold;">Programming languages</font> <br>
|
| 154 |
+
• Python<br>
|
| 155 |
+
• Rust<br>
|
| 156 |
+
• C++<br>
|
| 157 |
+
• HTML<br>
|
| 158 |
+
• CSS<br>
|
| 159 |
+
• JavaScript<br>
|
| 160 |
+
• Java<br>
|
| 161 |
+
• Kotlin<br>
|
| 162 |
+
• Golang<br>
|
| 163 |
+
• Haskell<br>
|
| 164 |
+
</p>
|
| 165 |
+
<p>
|
| 166 |
+
<font style="color: #0087f2; font-weight: bold;">Tech stack</font> <br>
|
| 167 |
+
• Vulkan<br>
|
| 168 |
+
• OpenGL<br>
|
| 169 |
+
• PyTorch<br>
|
| 170 |
+
• Tensorflow<br>
|
| 171 |
+
• OpenCV<br>
|
| 172 |
+
• Django<br>
|
| 173 |
+
• FastAPI<br>
|
| 174 |
+
• Tokio<br>
|
| 175 |
+
• Rayon<br>
|
| 176 |
+
• WebGPU<br>
|
| 177 |
+
• CUDA<br>
|
| 178 |
+
</p>
|
| 179 |
+
<center>
|
| 180 |
+
<font style="color: #f23000; font-weight: bold;">Projects</font>
|
| 181 |
+
</center>
|
| 182 |
+
<p>
|
| 183 |
+
<font style="color: #0087f2; font-weight: bold;">Amber</font> <br>
|
| 184 |
+
A chatbot fine-tuned on the MiniLM-L12-v2 model. It maps human language to structured answers using a hierarchical
|
| 185 |
+
traversal of its knowledge base. Designed to adapt context or update knowledge on the fly.
|
| 186 |
+
<br>
|
| 187 |
+
<a href="github.com"> ↗ View on Github </a>
|
| 188 |
+
</p>
|
| 189 |
+
<p>
|
| 190 |
+
<font style="color: #0087f2; font-weight: bold;">Arx Net</font> <br>
|
| 191 |
+
An open-source graph visualization tool powered by WebGPU. Built for real-time interaction with large graphs—ideal
|
| 192 |
+
for networks in biology, social media, or systems modeling.
|
| 193 |
+
<br>
|
| 194 |
+
<a href="github.com"> ↗ View on Github </a>
|
| 195 |
+
</p>
|
| 196 |
+
<br>
|
| 197 |
+
<center>
|
| 198 |
+
<font style="color: #f23000; font-weight: bold;">Work experience and competitions</font>
|
| 199 |
+
</center>
|
| 200 |
+
<p>
|
| 201 |
+
<font style="color: #0087f2; font-weight: bold;">Internships, Jobs, and Competitions</font> <br>
|
| 202 |
+
Participated in 3 hackathons. <br>
|
| 203 |
+
No internships or work experience (yet)
|
| 204 |
+
</p>
|
| 205 |
+
<br>
|
| 206 |
+
<center>
|
| 207 |
+
<font style="color: #f23000; font-weight: bold;">Contact</font>
|
| 208 |
+
</center>
|
| 209 |
+
<p>
|
| 210 |
+
<font style="color: #0087f2; font-weight: bold;">Contact link</font> <br>
|
| 211 |
+
• Email: <a href="https://www.github.com/Dwarakesh-V"> dwarakesh.2005.4@gmail.com </a> <br>
|
| 212 |
+
• Github: <a href="https://www.github.com/Dwarakesh-V"> https://www.github.com/Dwarakesh-V </a> <br>
|
| 213 |
+
• LinkedIn: <a href="https://www.linkedin.com/in/vdwarakesh/"> https://www.linkedin.com/in/vdwarakesh/
|
| 214 |
+
</a>
|
| 215 |
+
<br>
|
| 216 |
+
</p>
|
| 217 |
+
</div>
|
| 218 |
+
<script src="/static/script.js"></script>
|
| 219 |
+
</body>
|
| 220 |
+
|
| 221 |
+
</html>
|
tree.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pickle
|
| 2 |
+
|
| 3 |
+
class node:
|
| 4 |
+
"""Individual node class of each node of tree"""
|
| 5 |
+
def __init__(self, value=None):
|
| 6 |
+
self.value = value
|
| 7 |
+
self.parent = None
|
| 8 |
+
self.children = []
|
| 9 |
+
self.embedding_cache = None # Will hold precomputed embeddings if available
|
| 10 |
+
|
| 11 |
+
def __repr__(self):
|
| 12 |
+
return str(self.value[0]) if isinstance(self.value, tuple) else str(self.value)
|
| 13 |
+
|
| 14 |
+
class struct_tree:
|
| 15 |
+
def __init__(self,value): # Root is mandatory
|
| 16 |
+
self.root = node(value)
|
| 17 |
+
|
| 18 |
+
def add_node(self, parent_node: node, value): # Add a node
|
| 19 |
+
"""Add node to the tree."""
|
| 20 |
+
new_node = node(value)
|
| 21 |
+
new_node.parent = parent_node
|
| 22 |
+
parent_node.children.append(new_node)
|
| 23 |
+
return new_node
|
| 24 |
+
|
| 25 |
+
def visualize(self, show_labels=True):
|
| 26 |
+
"""Display the tree in a graph-like format."""
|
| 27 |
+
def _print_node(current_node, prefix="", is_last=True):
|
| 28 |
+
# Print current node
|
| 29 |
+
branch = "└── " if is_last else "├── "
|
| 30 |
+
print(f"{prefix}{branch}{current_node.value if show_labels else 'o'}")
|
| 31 |
+
|
| 32 |
+
# Prepare prefix for children
|
| 33 |
+
extension = " " if is_last else "│ "
|
| 34 |
+
new_prefix = prefix + extension
|
| 35 |
+
|
| 36 |
+
# Print children
|
| 37 |
+
child_count = len(current_node.children)
|
| 38 |
+
for i, child in enumerate(current_node.children):
|
| 39 |
+
is_last_child = i == child_count - 1
|
| 40 |
+
_print_node(child, new_prefix, is_last_child)
|
| 41 |
+
|
| 42 |
+
# Start recursive printing from root
|
| 43 |
+
print(f"{self.root.value if show_labels else 'o'}")
|
| 44 |
+
child_count = len(self.root.children)
|
| 45 |
+
for i, child in enumerate(self.root.children):
|
| 46 |
+
is_last_child = i == child_count - 1
|
| 47 |
+
_print_node(child, "", is_last_child)
|
| 48 |
+
|
| 49 |
+
def save(self, filepath: str):
|
| 50 |
+
"""Save the entire tree structure to disk with optional compression."""
|
| 51 |
+
with open(filepath, 'wb') as f:
|
| 52 |
+
pickle.dump(self, f)
|
| 53 |
+
|
| 54 |
+
@staticmethod # This method is called directly on the class rather than an instance of it
|
| 55 |
+
def load(filepath: str):
|
| 56 |
+
"""Load the tree structure from disk."""
|
| 57 |
+
with open(filepath, 'rb') as f:
|
| 58 |
+
return pickle.load(f)
|
| 59 |
+
|
| 60 |
+
if __name__ == "__main__": # Execute this only in this file
|
| 61 |
+
# Create a tree with root value "A"
|
| 62 |
+
tree = struct_tree("A")
|
| 63 |
+
|
| 64 |
+
# Add some nodes
|
| 65 |
+
b_node = tree.add_node(tree.root, "B")
|
| 66 |
+
c_node = tree.add_node(tree.root, "C")
|
| 67 |
+
d_node = tree.add_node(tree.root, "D")
|
| 68 |
+
|
| 69 |
+
# Add children to B
|
| 70 |
+
tree.add_node(b_node, "B1")
|
| 71 |
+
b2_node = tree.add_node(b_node, "B2")
|
| 72 |
+
tree.add_node(b2_node, "B2.1")
|
| 73 |
+
|
| 74 |
+
# Add children to C
|
| 75 |
+
tree.add_node(c_node, "C1")
|
| 76 |
+
|
| 77 |
+
# Visualize the tree
|
| 78 |
+
tree.visualize(show_labels=False)
|
tree_data/company_tree_data.json
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"intent": "company",
|
| 3 |
+
"label":"company",
|
| 4 |
+
"examples": ["Everything"],
|
| 5 |
+
"response": "Ask me anything related to the company!",
|
| 6 |
+
"children": [
|
| 7 |
+
{
|
| 8 |
+
"intent": "job opportunities",
|
| 9 |
+
"label": "ai and data science",
|
| 10 |
+
"examples": ["Are there jobs available?", "Looking for work", "Any openings?"],
|
| 11 |
+
"response": "We currently have job openings in multiple domains like Data Science and AI/ML.",
|
| 12 |
+
"children": [
|
| 13 |
+
{
|
| 14 |
+
"intent": "data science",
|
| 15 |
+
"label": "data science",
|
| 16 |
+
"examples": ["data science", "Interested in data science"],
|
| 17 |
+
"response": "We have open positions in Data Science. Visit careers/company.com/data-science."
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"intent": "aiml",
|
| 21 |
+
"label": "ai",
|
| 22 |
+
"examples": ["machine learning", "Interested in machine learning and artificial intelligence", "AI", "ML", "AI/ML"],
|
| 23 |
+
"response": "We're hiring in AI/ML. See careers/company.com/aiml."
|
| 24 |
+
}
|
| 25 |
+
]
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"intent": "courses",
|
| 29 |
+
"label": "ai and data science",
|
| 30 |
+
"examples": ["What courses do you have?", "Available classes?", "Do you offer courses?", "Are there any courses?"],
|
| 31 |
+
"response": "We offer courses in AI, Data Science, and Software Engineering."
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"intent": "contact",
|
| 35 |
+
"label": "",
|
| 36 |
+
"examples": ["How can I reach you?", "Contact info please", "Can I call you?", "Can I speak to your", "Can I colab with your", "Manager contact"],
|
| 37 |
+
"response": "Reach us via email at info@company.com or call 123-456-7890."
|
| 38 |
+
}
|
| 39 |
+
]
|
| 40 |
+
}
|
tree_data/company_tree_data.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e2a4e4987926874c6f498b65896017c8ab64c5d3bef67e9438084ae71f1c922
|
| 3 |
+
size 35329
|
tree_data/portfolio_tree_data.json
ADDED
|
@@ -0,0 +1,951 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"intent": "Amber",
|
| 3 |
+
"label": "Amber",
|
| 4 |
+
"examples": [],
|
| 5 |
+
"response": "Hello, I'm Amber. Ask me anything!",
|
| 6 |
+
"children": [
|
| 7 |
+
{
|
| 8 |
+
"intent": "greetings",
|
| 9 |
+
"label": "greetings",
|
| 10 |
+
"examples": [
|
| 11 |
+
"Hi",
|
| 12 |
+
"Hi there",
|
| 13 |
+
"Hello",
|
| 14 |
+
"Hey",
|
| 15 |
+
"Heya",
|
| 16 |
+
"Howdy",
|
| 17 |
+
"Yo",
|
| 18 |
+
"Good to see you",
|
| 19 |
+
"Welcome",
|
| 20 |
+
"Glad you're here",
|
| 21 |
+
"Nice to have you",
|
| 22 |
+
"Sup",
|
| 23 |
+
"Hiya"
|
| 24 |
+
],
|
| 25 |
+
"response": [
|
| 26 |
+
"Hey there",
|
| 27 |
+
"Hello",
|
| 28 |
+
"Hi",
|
| 29 |
+
"Hey",
|
| 30 |
+
"Howdy",
|
| 31 |
+
"Greetings",
|
| 32 |
+
"Welcome"
|
| 33 |
+
]
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"intent": "goodbyes",
|
| 37 |
+
"label": "goodbyes",
|
| 38 |
+
"examples": [
|
| 39 |
+
"Bye!",
|
| 40 |
+
"Goodbye",
|
| 41 |
+
"See ya",
|
| 42 |
+
"Cya",
|
| 43 |
+
"Catch you later!",
|
| 44 |
+
"Later!",
|
| 45 |
+
"Take care",
|
| 46 |
+
"Peace out",
|
| 47 |
+
"Talk soon",
|
| 48 |
+
"See you around",
|
| 49 |
+
"Have a good one",
|
| 50 |
+
"See ya later",
|
| 51 |
+
"I'm out",
|
| 52 |
+
"Adios"
|
| 53 |
+
],
|
| 54 |
+
"response": [
|
| 55 |
+
"Catch you later",
|
| 56 |
+
"See you later",
|
| 57 |
+
"Goodbye",
|
| 58 |
+
"Take care",
|
| 59 |
+
"Bye",
|
| 60 |
+
"See ya"
|
| 61 |
+
]
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"intent": "thanks",
|
| 65 |
+
"label": "thanks",
|
| 66 |
+
"examples": [
|
| 67 |
+
"Thanks",
|
| 68 |
+
"Thank you",
|
| 69 |
+
"Much appreciated",
|
| 70 |
+
"Thanks a lot",
|
| 71 |
+
"Really appreciate it",
|
| 72 |
+
"Thanks so much",
|
| 73 |
+
"Big thanks",
|
| 74 |
+
"Appreciate it"
|
| 75 |
+
],
|
| 76 |
+
"response": [
|
| 77 |
+
"Anytime",
|
| 78 |
+
"Happy to help",
|
| 79 |
+
"You're welcome",
|
| 80 |
+
"No problem",
|
| 81 |
+
"Glad I could help",
|
| 82 |
+
"Anytime"
|
| 83 |
+
]
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"intent": "about",
|
| 87 |
+
"label": "about",
|
| 88 |
+
"examples": [
|
| 89 |
+
"Tell me about yourself",
|
| 90 |
+
"About you",
|
| 91 |
+
"Tell me something about you",
|
| 92 |
+
"Can you tell me about yourself?",
|
| 93 |
+
"About yourself"
|
| 94 |
+
],
|
| 95 |
+
"response": "I am a student exploring AI/ML and GPU rendering, often diving into Vulkan/low-level graphics. Curiosity-driven projects blend learning with stubborn tinkering."
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"intent": "who are you",
|
| 99 |
+
"label": "who are you",
|
| 100 |
+
"examples": [
|
| 101 |
+
"Who are you?",
|
| 102 |
+
"What's your name?",
|
| 103 |
+
"And you are?",
|
| 104 |
+
"What are you called?",
|
| 105 |
+
"Mind telling me your name?",
|
| 106 |
+
"Can I ask your name?",
|
| 107 |
+
"Who am I talking to?",
|
| 108 |
+
"What do people call you?",
|
| 109 |
+
"What are you?",
|
| 110 |
+
"What am I talking to?"
|
| 111 |
+
],
|
| 112 |
+
"response": [
|
| 113 |
+
"I am Amber. And I am representing a part of my creator - Dwarakesh.",
|
| 114 |
+
"I am Amber, a chatbot created by and represents Dwarakesh."
|
| 115 |
+
]
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"intent": "what are you doing",
|
| 119 |
+
"label": "what are you doing",
|
| 120 |
+
"examples": [
|
| 121 |
+
"What are you doing?",
|
| 122 |
+
"What are you up to?"
|
| 123 |
+
],
|
| 124 |
+
"response": [
|
| 125 |
+
"Chatting with you! Ask me anything related to me or my creator.",
|
| 126 |
+
"I am here to chat with you! Ask me anything related to me or my creator."
|
| 127 |
+
]
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"intent": "where are you from",
|
| 131 |
+
"label": "where are you from",
|
| 132 |
+
"examples": [
|
| 133 |
+
"Where are you from?",
|
| 134 |
+
"Where do you come from?",
|
| 135 |
+
"What's your origin?",
|
| 136 |
+
"Where were you made?",
|
| 137 |
+
"Where do you live?",
|
| 138 |
+
"Which place are you based in?",
|
| 139 |
+
"Where were you born?",
|
| 140 |
+
"What place are you from?"
|
| 141 |
+
],
|
| 142 |
+
"response": [
|
| 143 |
+
"I am from Coimbatore. A city in Tamil Nadu, India."
|
| 144 |
+
],
|
| 145 |
+
"children": [
|
| 146 |
+
{
|
| 147 |
+
"intent": "country",
|
| 148 |
+
"label": "country",
|
| 149 |
+
"examples": [
|
| 150 |
+
"Which country are you from?",
|
| 151 |
+
"What country are you in?",
|
| 152 |
+
"What is your country?",
|
| 153 |
+
"What country do you belong to?"
|
| 154 |
+
],
|
| 155 |
+
"response": "I am from India, a country in South Asia."
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"intent": "state",
|
| 159 |
+
"label": "state",
|
| 160 |
+
"examples": [
|
| 161 |
+
"Which state are you from?",
|
| 162 |
+
"What state are you in?",
|
| 163 |
+
"What is your state?",
|
| 164 |
+
"What state do you belong to?"
|
| 165 |
+
],
|
| 166 |
+
"response": "I am from Tamil Nadu, a state in South India."
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"intent": "city",
|
| 170 |
+
"label": "city",
|
| 171 |
+
"examples": [
|
| 172 |
+
"Which city are you from?",
|
| 173 |
+
"What city are you in?",
|
| 174 |
+
"What is your city?",
|
| 175 |
+
"What city do you belong to?"
|
| 176 |
+
],
|
| 177 |
+
"response": "I am from Coimbatore, located in Tamil Nadu."
|
| 178 |
+
}
|
| 179 |
+
]
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"intent": "where are you from server",
|
| 183 |
+
"label": "where are you from server",
|
| 184 |
+
"examples": [
|
| 185 |
+
"Server",
|
| 186 |
+
"Where are you hosted?",
|
| 187 |
+
"Where do you run?",
|
| 188 |
+
"Source code",
|
| 189 |
+
"Where can I see your code?",
|
| 190 |
+
"Where are you deployed?"
|
| 191 |
+
],
|
| 192 |
+
"response": "I run on Hugging face spaces, and my source code is available on Github under the MIT license."
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"intent": "how are you",
|
| 196 |
+
"label": "how are you",
|
| 197 |
+
"examples": [
|
| 198 |
+
"How are you?",
|
| 199 |
+
"How's it going?",
|
| 200 |
+
"How are things?",
|
| 201 |
+
"You doing okay?",
|
| 202 |
+
"How have you been?",
|
| 203 |
+
"Everything good?",
|
| 204 |
+
"All good with you?"
|
| 205 |
+
],
|
| 206 |
+
"response": [
|
| 207 |
+
"I'm doing just fine. Hope you are too.",
|
| 208 |
+
"I'm doing great. Hope that's the case with you too.",
|
| 209 |
+
"I'm doing well, thanks for asking. Hope you're doing well too."
|
| 210 |
+
]
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"intent": "education",
|
| 214 |
+
"label": "education",
|
| 215 |
+
"examples": [
|
| 216 |
+
"Where did you study?",
|
| 217 |
+
"Education stats",
|
| 218 |
+
"School",
|
| 219 |
+
"College",
|
| 220 |
+
"Which school did you study in?",
|
| 221 |
+
"Which college did you study in?",
|
| 222 |
+
"Where are you studying?"
|
| 223 |
+
],
|
| 224 |
+
"response": "I studied at PSBB Millennium till 8th and Suguna Pip School till 12th, both in Coimbatore. I'm currently doing my undergrad at Amrita Vishwa Vidyapeetham, Coimbatore.",
|
| 225 |
+
"children": [
|
| 226 |
+
{
|
| 227 |
+
"intent": "school",
|
| 228 |
+
"label": "school",
|
| 229 |
+
"examples": [
|
| 230 |
+
"School",
|
| 231 |
+
"Which school did you study in?",
|
| 232 |
+
"Where was your early education?"
|
| 233 |
+
],
|
| 234 |
+
"response": "I studied at PSBB Millennium till 8th and Suguna Pip School till 12th, both in Coimbatore. As for kindergarten, I was in Akshara fun School, Coimbatore"
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"intent": "college",
|
| 238 |
+
"label": "college",
|
| 239 |
+
"examples": [
|
| 240 |
+
"College",
|
| 241 |
+
"Which college did you study in?",
|
| 242 |
+
"Where did you do your UG?",
|
| 243 |
+
"Where did you complete your undergraduate degree?",
|
| 244 |
+
"Are you pursuing your undergraduate degree?"
|
| 245 |
+
],
|
| 246 |
+
"response": "I'm currently pursuing my undergraduate degree for B.TECH Computer Science in Amrita Vishwa Vidyapeetham, Coimbatore"
|
| 247 |
+
}
|
| 248 |
+
]
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"intent": "exam fail",
|
| 252 |
+
"label": "exam fail",
|
| 253 |
+
"examples": [
|
| 254 |
+
"Have you failed an exam?",
|
| 255 |
+
"Did you ever mess up an exam?",
|
| 256 |
+
"Have you failed before?",
|
| 257 |
+
"Ever gotten something totally wrong?",
|
| 258 |
+
"Have you had a bad result?"
|
| 259 |
+
],
|
| 260 |
+
"response": "I have failed in my JEE Advanced exam. My score was 48, while the cutoff was 55, out of 360."
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"intent": "arrears",
|
| 264 |
+
"label": "arrears",
|
| 265 |
+
"examples": [
|
| 266 |
+
"Arrears",
|
| 267 |
+
"Do you have any arrears?",
|
| 268 |
+
"Any pending subjects to clear?",
|
| 269 |
+
"Any backlogs?"
|
| 270 |
+
],
|
| 271 |
+
"response": [
|
| 272 |
+
"Nope! I'm all clear - never been in arrears.",
|
| 273 |
+
"I have never had any arrears, and never will!"
|
| 274 |
+
]
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"intent": "future plans",
|
| 278 |
+
"label": "higher studies",
|
| 279 |
+
"examples": [
|
| 280 |
+
"Future plans",
|
| 281 |
+
"What do you plan to do in the future?",
|
| 282 |
+
"Got any plans for later?",
|
| 283 |
+
"Higher studies",
|
| 284 |
+
"Are you planning for higher studies?",
|
| 285 |
+
"After college",
|
| 286 |
+
"What will you do after college?",
|
| 287 |
+
"What will you do after getting a degree?"
|
| 288 |
+
],
|
| 289 |
+
"response": "I have no plans for higher studies. I wish for a placement in a good company, but I have not decided my domain yet.",
|
| 290 |
+
"children": [
|
| 291 |
+
{
|
| 292 |
+
"intent": "higher studies",
|
| 293 |
+
"label": "higher studies",
|
| 294 |
+
"examples": [
|
| 295 |
+
"Are you planning for higher studies?",
|
| 296 |
+
"What are your plans for higher studies?",
|
| 297 |
+
"Do you want to pursue higher studies?"
|
| 298 |
+
],
|
| 299 |
+
"response": "I have no plans for higher studies."
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"intent": "placement",
|
| 303 |
+
"label": "placement",
|
| 304 |
+
"examples": [
|
| 305 |
+
"What will you do after college?",
|
| 306 |
+
"What will you do after getting a degree?",
|
| 307 |
+
"Are you looking for a job?",
|
| 308 |
+
"Are you looking for placement?"
|
| 309 |
+
],
|
| 310 |
+
"response": "I wish for a placement in a good company, but I have not decided my domain yet."
|
| 311 |
+
}
|
| 312 |
+
]
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"intent": "field of interest",
|
| 316 |
+
"label": "machine learning, deep learning",
|
| 317 |
+
"examples": [
|
| 318 |
+
"What are you interested in?",
|
| 319 |
+
"Technical interests?",
|
| 320 |
+
"What is your field of interest?",
|
| 321 |
+
"Is something your field of interest?"
|
| 322 |
+
],
|
| 323 |
+
"response": "I am particularly interested in machine learning and deep learning, but I do not dislike other domains."
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"intent": "hobbies",
|
| 327 |
+
"label": "blender, badminton",
|
| 328 |
+
"examples": [
|
| 329 |
+
"What do you in your free time?",
|
| 330 |
+
"Hobbies",
|
| 331 |
+
"What are your hobbies?",
|
| 332 |
+
"Recreational activities",
|
| 333 |
+
"Blender",
|
| 334 |
+
"Do you use blender?",
|
| 335 |
+
"What do you do in blender?",
|
| 336 |
+
"How long have you been using blender?",
|
| 337 |
+
"Renders",
|
| 338 |
+
"Can I see your renders?",
|
| 339 |
+
"Show me your renders",
|
| 340 |
+
"Badminton",
|
| 341 |
+
"How long have you been playing badminton?",
|
| 342 |
+
"How good are you at badminton?",
|
| 343 |
+
"Drawing",
|
| 344 |
+
"What do you draw?",
|
| 345 |
+
"Since when have you been drawing?",
|
| 346 |
+
"Can I see your drawings?",
|
| 347 |
+
"Show me your drawings"
|
| 348 |
+
],
|
| 349 |
+
"response": "I am a 3D enthusiast primarily working on the free open source software Blender. I also play badminton, and draw sometimes.",
|
| 350 |
+
"children": [
|
| 351 |
+
{
|
| 352 |
+
"intent": "blender",
|
| 353 |
+
"label": "blender",
|
| 354 |
+
"examples": [
|
| 355 |
+
"Blender",
|
| 356 |
+
"Do you use blender?",
|
| 357 |
+
"What do you do in blender?",
|
| 358 |
+
"How long have you been using blender?",
|
| 359 |
+
"How experienced are you in blender?",
|
| 360 |
+
"What have you made with blender?",
|
| 361 |
+
"How do you use blender?",
|
| 362 |
+
"Can you show me your renders?",
|
| 363 |
+
"Any good renders?",
|
| 364 |
+
"Renders",
|
| 365 |
+
"Can I see your renders?",
|
| 366 |
+
"Show me your renders"
|
| 367 |
+
],
|
| 368 |
+
"response": "I have been using blender for since 2021. I am primarily a hard surface modelling artist with some experience in environmental and interior design.",
|
| 369 |
+
"children": [
|
| 370 |
+
{
|
| 371 |
+
"intent": "blender time",
|
| 372 |
+
"label": "blender",
|
| 373 |
+
"examples": [
|
| 374 |
+
"When did you start using blender?",
|
| 375 |
+
"How long have you been using blender?",
|
| 376 |
+
"Since when have you been using blender?"
|
| 377 |
+
],
|
| 378 |
+
"response": "I have been using blender since 2021."
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"intent": "blender render",
|
| 382 |
+
"label": "blender",
|
| 383 |
+
"examples": [
|
| 384 |
+
"What have you made with blender?",
|
| 385 |
+
"How do you use blender?",
|
| 386 |
+
"Can you show me your renders?",
|
| 387 |
+
"Any good renders?",
|
| 388 |
+
"Renders",
|
| 389 |
+
"Can I see your renders?",
|
| 390 |
+
"Show me your renders"
|
| 391 |
+
],
|
| 392 |
+
"response": "Here is some of my work. <br> <img src = '/static/mountain_tent.jpg' onclick = 'requestFullscreen(this)'> <br> <img src = '/static/forest_tent.png' onclick = 'requestFullscreen(this)'> <br> <img src = '/static/my_room.png' onclick = 'requestFullscreen(this)'>"
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"intent": "blender experience",
|
| 396 |
+
"label": "experience",
|
| 397 |
+
"examples": [
|
| 398 |
+
"How experienced are you in blender?",
|
| 399 |
+
"How good are you at blender?",
|
| 400 |
+
"How good are you at 3D modelling?"
|
| 401 |
+
],
|
| 402 |
+
"response": "I have been using blender for over 4 years, so I consider myself to be decent."
|
| 403 |
+
}
|
| 404 |
+
]
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"intent": "badminton",
|
| 408 |
+
"label": "badminton",
|
| 409 |
+
"examples": [
|
| 410 |
+
"Badminton",
|
| 411 |
+
"How long have you been playing badminton?",
|
| 412 |
+
"Since when have you been playing badminton?",
|
| 413 |
+
"How experienced are you at badminton?"
|
| 414 |
+
],
|
| 415 |
+
"response": "I have been playing badminton since I was 12, but I have had very long breaks in between. Now it is a part of my daily life, and I consider myself an above average player.",
|
| 416 |
+
"children": [
|
| 417 |
+
{
|
| 418 |
+
"intent": "badminton time",
|
| 419 |
+
"label": "badminton",
|
| 420 |
+
"examples": [
|
| 421 |
+
"How long have you been playing badminton?",
|
| 422 |
+
"Since when have you been playing badminton?"
|
| 423 |
+
],
|
| 424 |
+
"response": "I have been playing badminton since I was 12, but I have had very long breaks in between. Now it is a part of my daily life."
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"intent": "badminton experience",
|
| 428 |
+
"label": "experience",
|
| 429 |
+
"examples": [
|
| 430 |
+
"How good are you at badminton?",
|
| 431 |
+
"How experienced are you at badminton?"
|
| 432 |
+
],
|
| 433 |
+
"response": "I consider myself an above average player."
|
| 434 |
+
}
|
| 435 |
+
]
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"intent": "drawing",
|
| 439 |
+
"label": "drawing",
|
| 440 |
+
"examples": [
|
| 441 |
+
"Drawing",
|
| 442 |
+
"What do you draw?",
|
| 443 |
+
"Since when have you been drawing?",
|
| 444 |
+
"Can I see your drawings?",
|
| 445 |
+
"Show me your drawings"
|
| 446 |
+
],
|
| 447 |
+
"response": "I draw rarely. It used to be my favourite hobby in the past, until I discovered Blender.",
|
| 448 |
+
"children": [
|
| 449 |
+
{
|
| 450 |
+
"intent": "drawing time",
|
| 451 |
+
"label": "drawing",
|
| 452 |
+
"examples": [
|
| 453 |
+
"How long have you been drawing?",
|
| 454 |
+
"Since when have you been drawing?"
|
| 455 |
+
],
|
| 456 |
+
"response": "I have been drawing since I was 10 years old."
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"intent": "drawing experience",
|
| 460 |
+
"label": "drawing",
|
| 461 |
+
"examples": [
|
| 462 |
+
"How good are you at drawing?",
|
| 463 |
+
"How experienced are you at drawing?"
|
| 464 |
+
],
|
| 465 |
+
"response": "I am really not sure... it turns out really well sometimes and really bad some other times."
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"intent": "see drawings",
|
| 469 |
+
"label": "see drawings",
|
| 470 |
+
"examples": [
|
| 471 |
+
"Can I see your drawings?",
|
| 472 |
+
"Show me your drawings",
|
| 473 |
+
"What have you drawn?"
|
| 474 |
+
],
|
| 475 |
+
"response": "Here are some of my drawings. <br> <img src = '/static/holter.jpg' onclick = 'requestFullscreen(this)'> <br> <img src = '/static/stark.jpg' onclick = 'requestFullscreen(this)'> <br> <img src = '/static/kratos.jpg' onclick = 'requestFullscreen(this)'>"
|
| 476 |
+
}
|
| 477 |
+
]
|
| 478 |
+
}
|
| 479 |
+
]
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"intent": "dream",
|
| 483 |
+
"label": "dream",
|
| 484 |
+
"examples": [
|
| 485 |
+
"What is your dream?",
|
| 486 |
+
"Dream job?",
|
| 487 |
+
"What do you want to be?",
|
| 488 |
+
"What is your ambition?",
|
| 489 |
+
"What do you aspire to be?"
|
| 490 |
+
],
|
| 491 |
+
"response": "I would like to be placed among my favourite companies, which are Google, Nvidia and Tesla, but I have no specific dream job.",
|
| 492 |
+
"children": [
|
| 493 |
+
{
|
| 494 |
+
"intent": "dream company",
|
| 495 |
+
"label": "google, nvidia, tesla",
|
| 496 |
+
"examples": [
|
| 497 |
+
"What is your dream company?",
|
| 498 |
+
"Dream company?",
|
| 499 |
+
"Which company do you want to work for?",
|
| 500 |
+
"What is your favourite company?",
|
| 501 |
+
"What companies do you like?",
|
| 502 |
+
"Where do you want to work?",
|
| 503 |
+
"Where do you want to be placed?",
|
| 504 |
+
"What is your dream job?"
|
| 505 |
+
],
|
| 506 |
+
"response": "My favourite companies are Google, Nvidia and Tesla. I would like to be placed among them."
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"intent": "dream",
|
| 510 |
+
"label": "artificial intelligence",
|
| 511 |
+
"examples": [
|
| 512 |
+
"What is your dream?",
|
| 513 |
+
"What do you want to achieve?",
|
| 514 |
+
"What is something you want to do?"
|
| 515 |
+
],
|
| 516 |
+
"response": "My dream is to revolutionize the field of artificial intelligence and create industry standard applications."
|
| 517 |
+
}
|
| 518 |
+
]
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"intent": "work experience",
|
| 522 |
+
"label": "work",
|
| 523 |
+
"examples": [
|
| 524 |
+
"Do you have work experience?",
|
| 525 |
+
"Any jobs before?",
|
| 526 |
+
"Have you worked before?",
|
| 527 |
+
"Your job history?",
|
| 528 |
+
"Have you worked in a job before?",
|
| 529 |
+
"Experience",
|
| 530 |
+
"What is your experience?",
|
| 531 |
+
"About your experience"
|
| 532 |
+
],
|
| 533 |
+
"response": "I haven't done any official jobs yet. Just learning and building stuff for now."
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"intent": "internships",
|
| 537 |
+
"label": "internships",
|
| 538 |
+
"examples": [
|
| 539 |
+
"Have you done any internships?",
|
| 540 |
+
"Internships?",
|
| 541 |
+
"Have you interned anywhere?",
|
| 542 |
+
"Any internships?"
|
| 543 |
+
],
|
| 544 |
+
"response": "I haven't done any internships yet. I'm learning stuff for now."
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"intent": "competitions",
|
| 548 |
+
"label": "competitions",
|
| 549 |
+
"examples": [
|
| 550 |
+
"Have you participated in any competitions?",
|
| 551 |
+
"Competitions?",
|
| 552 |
+
"Have you competed in anything?",
|
| 553 |
+
"Any competitions?",
|
| 554 |
+
"Hackathons?",
|
| 555 |
+
"Have you participated in any hackathons?",
|
| 556 |
+
"Have you competed in any hackathons?",
|
| 557 |
+
"Any hackathons?",
|
| 558 |
+
"Have you qualified for a hackathon?"
|
| 559 |
+
],
|
| 560 |
+
"response": "I have participated in a few hackathons, but I haven't won any yet. I'm still learning and improving."
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"intent": "skills",
|
| 564 |
+
"label": "skills",
|
| 565 |
+
"examples": [
|
| 566 |
+
"What are your skills?",
|
| 567 |
+
"Skills?",
|
| 568 |
+
"What are you good at?",
|
| 569 |
+
"What are your strengths?",
|
| 570 |
+
"What are your abilities?",
|
| 571 |
+
"What can you do?"
|
| 572 |
+
],
|
| 573 |
+
"response": "I am skilled in the languages Python, Rust, C++, Java, HTML, CSS, JavaScript, Java, Kotlin, Golang and Haskell. I am familiar with the tech stack Vulkan, OpenGL, Pytorch, Tensorflow, OpenCV, Django, FastAPI, Tokio, Rayon, WebGPU and CUDA."
|
| 574 |
+
},
|
| 575 |
+
{
|
| 576 |
+
"intent": "specialization",
|
| 577 |
+
"label": "specialization",
|
| 578 |
+
"examples": [
|
| 579 |
+
"What is your specialization?",
|
| 580 |
+
"Specialization?",
|
| 581 |
+
"What are you specialized in?",
|
| 582 |
+
"What is your major?"
|
| 583 |
+
],
|
| 584 |
+
"response": "I am specialized in Computer Science, with a focus on AI/ML and GPU rendering. I am also interested in low-level graphics programming."
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"intent": "certifications",
|
| 588 |
+
"label": "certifications",
|
| 589 |
+
"examples": [
|
| 590 |
+
"Have you done any certifications?",
|
| 591 |
+
"Certifications?",
|
| 592 |
+
"Have you completed any courses?",
|
| 593 |
+
"Any certifications?",
|
| 594 |
+
"Have you done any online courses?"
|
| 595 |
+
],
|
| 596 |
+
"response": "I have completed a few online courses, but I haven't received any certifications yet."
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"intent": "languages",
|
| 600 |
+
"label": "languages",
|
| 601 |
+
"examples": [
|
| 602 |
+
"Languages?",
|
| 603 |
+
"What languages do you know?",
|
| 604 |
+
"How many languages do you know?",
|
| 605 |
+
"Can you speak language?",
|
| 606 |
+
"What languages can you speak?",
|
| 607 |
+
"What languages are you fluent in?"
|
| 608 |
+
],
|
| 609 |
+
"response": "I am fluent in English, Tamil and Telugu, and somewhat fluent in Hindi and Malayalam."
|
| 610 |
+
},
|
| 611 |
+
{
|
| 612 |
+
"intent": "merits and demerits",
|
| 613 |
+
"label": "merits and demerits",
|
| 614 |
+
"examples": [
|
| 615 |
+
"What are your merits?",
|
| 616 |
+
"What are your demerits?",
|
| 617 |
+
"What are your merits and demerits?",
|
| 618 |
+
"What are your strengths and weaknesses?",
|
| 619 |
+
"What are your pros and cons?",
|
| 620 |
+
"What are your good and bad points?",
|
| 621 |
+
"What are your good and bad qualities?"
|
| 622 |
+
],
|
| 623 |
+
"response": "I am a very curious person, and I love to learn new things. I am also very stubborn, and I don't give up easily. My demerit is that I am not very good at communicating with people, and I tend to be a bit shy.",
|
| 624 |
+
"children": [
|
| 625 |
+
{
|
| 626 |
+
"intent": "merits",
|
| 627 |
+
"label": "merits",
|
| 628 |
+
"examples": [
|
| 629 |
+
"What are your merits?",
|
| 630 |
+
"What are your strengths?",
|
| 631 |
+
"What are your good points?",
|
| 632 |
+
"What are your good qualities?"
|
| 633 |
+
],
|
| 634 |
+
"response": "I am a very curious person, and I love to learn new things. I am also very stubborn, and I don't give up easily."
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"intent": "demerits",
|
| 638 |
+
"label": "demerits",
|
| 639 |
+
"examples": [
|
| 640 |
+
"What are your demerits?",
|
| 641 |
+
"What are your weaknesses?",
|
| 642 |
+
"What are your bad points?",
|
| 643 |
+
"What are your bad qualities?"
|
| 644 |
+
],
|
| 645 |
+
"response": "My demerit is that I am not very good at communicating with people, and I tend to be a bit shy."
|
| 646 |
+
}
|
| 647 |
+
]
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"intent": "research papers",
|
| 651 |
+
"label": "research papers",
|
| 652 |
+
"examples": [
|
| 653 |
+
"Have you published any research papers?",
|
| 654 |
+
"Research papers?",
|
| 655 |
+
"Have you written any research papers?",
|
| 656 |
+
"Any research papers?",
|
| 657 |
+
"Have you written any papers?",
|
| 658 |
+
"Are you researching on something?",
|
| 659 |
+
"Are you doing any research?",
|
| 660 |
+
"Are you working on any research?"
|
| 661 |
+
],
|
| 662 |
+
"response": "I have not published any research papers yet, but I am working on a few."
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"intent": "projects",
|
| 666 |
+
"label": "projects",
|
| 667 |
+
"examples": [
|
| 668 |
+
"What projects are you working on?",
|
| 669 |
+
"What are your projects?",
|
| 670 |
+
"What are you working on?",
|
| 671 |
+
"What is your current project?",
|
| 672 |
+
"What is your latest project?",
|
| 673 |
+
"What are your current projects?",
|
| 674 |
+
"What are your latest projects?",
|
| 675 |
+
"How do you even work?",
|
| 676 |
+
"How do you function?",
|
| 677 |
+
"How are you able to chat?",
|
| 678 |
+
"What is your technology?",
|
| 679 |
+
"What is your tech stack?",
|
| 680 |
+
"What is your architecture?",
|
| 681 |
+
"How are you built?",
|
| 682 |
+
"How are you made?",
|
| 683 |
+
"What is your framework?",
|
| 684 |
+
"About your tech stack",
|
| 685 |
+
"About your technology",
|
| 686 |
+
"About your working",
|
| 687 |
+
"About Amber AI",
|
| 688 |
+
"About Amber",
|
| 689 |
+
"About you",
|
| 690 |
+
"How does Amber work?",
|
| 691 |
+
"How does Amber function?",
|
| 692 |
+
"Arx-Net",
|
| 693 |
+
"What is Arx-Net?",
|
| 694 |
+
"How does Arx-Net work?",
|
| 695 |
+
"What is the tech stack of Arx-Net?",
|
| 696 |
+
"How is arx-net built?",
|
| 697 |
+
"What is the architecture of Arx-Net?"
|
| 698 |
+
],
|
| 699 |
+
"response": "I am currently working on Amber AI. My other projects include Arx-Net, an open source graph visualization tool, <project 2>, <project 3>, and <project 4>.",
|
| 700 |
+
"children": [
|
| 701 |
+
{
|
| 702 |
+
"intent": "Amber AI",
|
| 703 |
+
"label": "Amber AI",
|
| 704 |
+
"examples": [
|
| 705 |
+
"How does Amber AI even work?",
|
| 706 |
+
"How does Amber AIfunction?",
|
| 707 |
+
"How is Amber AI able to chat?",
|
| 708 |
+
"What is Amber AI technology?",
|
| 709 |
+
"What is Amber AI tech stack?",
|
| 710 |
+
"What is Amber AI architecture?",
|
| 711 |
+
"How is Amber AI built?",
|
| 712 |
+
"How is Amber AI made?",
|
| 713 |
+
"What is Amber AI framework?",
|
| 714 |
+
"About Amber AI tech stack",
|
| 715 |
+
"About Amber AItechnology",
|
| 716 |
+
"About Amber AI working",
|
| 717 |
+
"About Amber AI",
|
| 718 |
+
"About Amber",
|
| 719 |
+
"How does Amber work?",
|
| 720 |
+
"How does Amber function?"
|
| 721 |
+
],
|
| 722 |
+
"response": "I (Amber AI) am a chatbot created by Dwarakesh. For details on how I work, check out my source code and documentation on Github using <a href = 'https://www.github.com/Dwarakesh-V'> github.com/Dwarakesh-V </a>."
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"intent": "Arx-Net",
|
| 726 |
+
"label": "Arx-Net",
|
| 727 |
+
"examples": [
|
| 728 |
+
"Arx-Net",
|
| 729 |
+
"What is Arx-Net?",
|
| 730 |
+
"How does Arx-Net work?",
|
| 731 |
+
"What is the tech stack of Arx-Net?",
|
| 732 |
+
"How is arx-net built?",
|
| 733 |
+
"What is the architecture of Arx-Net?"
|
| 734 |
+
],
|
| 735 |
+
"response": "Arx-Net is an open source graph visualization tool. For details on how it works, check out the source code and documentation on Github using <a href = 'https://www.github.com/Dwarakesh-V'> github.com/Dwarakesh-V </a>."
|
| 736 |
+
}
|
| 737 |
+
]
|
| 738 |
+
},
|
| 739 |
+
{
|
| 740 |
+
"intent": "contact",
|
| 741 |
+
"label": "contact",
|
| 742 |
+
"examples": [
|
| 743 |
+
"How can I contact you?",
|
| 744 |
+
"How can I reach you?",
|
| 745 |
+
"How can I get in touch with you?",
|
| 746 |
+
"How can I connect with you?",
|
| 747 |
+
"How to contact you?",
|
| 748 |
+
"Contact",
|
| 749 |
+
"Contact",
|
| 750 |
+
"Can I colab with you?",
|
| 751 |
+
"Can I collaborate with you?",
|
| 752 |
+
"Can I work with you?",
|
| 753 |
+
"Contact information",
|
| 754 |
+
"Gmail",
|
| 755 |
+
"Email",
|
| 756 |
+
"Github",
|
| 757 |
+
"LinkedIn",
|
| 758 |
+
"Social media",
|
| 759 |
+
"Social links",
|
| 760 |
+
"Social media accounts",
|
| 761 |
+
"What is your email?",
|
| 762 |
+
"What is your github?",
|
| 763 |
+
"What is your linkedin?",
|
| 764 |
+
"What is your social media?"
|
| 765 |
+
],
|
| 766 |
+
"response": "You can contact me through Github on <a href = 'https://www.github.com/Dwarakesh-V'> github.com/Dwarakesh-V </a>, Email on <email link>, or LinkedIn on <a href = 'https://www.linkedin.com/in/vdwarakesh/'> linkedin.com/in/vdwarakesh </a>.",
|
| 767 |
+
"children": [
|
| 768 |
+
{
|
| 769 |
+
"intent": "email",
|
| 770 |
+
"label": "email",
|
| 771 |
+
"examples": [
|
| 772 |
+
"What is your email?",
|
| 773 |
+
"What is your email address?",
|
| 774 |
+
"Email",
|
| 775 |
+
"Gmail"
|
| 776 |
+
],
|
| 777 |
+
"response": "My email is <email link>."
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"intent": "github",
|
| 781 |
+
"label": "github",
|
| 782 |
+
"examples": [
|
| 783 |
+
"What is your github?",
|
| 784 |
+
"Github",
|
| 785 |
+
"Github link"
|
| 786 |
+
],
|
| 787 |
+
"response": "My github is <a href = 'https://www.github.com/Dwarakesh-V'> github.com/Dwarakesh-V </a>."
|
| 788 |
+
},
|
| 789 |
+
{
|
| 790 |
+
"intent": "linkedin",
|
| 791 |
+
"label": "linkedin",
|
| 792 |
+
"examples": [
|
| 793 |
+
"What is your linkedin?",
|
| 794 |
+
"LinkedIn",
|
| 795 |
+
"LinkedIn link"
|
| 796 |
+
],
|
| 797 |
+
"response": "My linkedin is <a href = 'https://www.linkedin.com/in/vdwarakesh/'> linkedin.com/in/vdwarakesh </a>."
|
| 798 |
+
}
|
| 799 |
+
]
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"intent": "favorite color",
|
| 803 |
+
"label": "favorite color",
|
| 804 |
+
"examples": [
|
| 805 |
+
"What is your favorite color?",
|
| 806 |
+
"Favorite color?",
|
| 807 |
+
"What color do you like?",
|
| 808 |
+
"Do you have a favorite color?",
|
| 809 |
+
"What colors do you like?"
|
| 810 |
+
],
|
| 811 |
+
"response": "My favourite color is green, tied with black."
|
| 812 |
+
},
|
| 813 |
+
{
|
| 814 |
+
"intent": "favorite food",
|
| 815 |
+
"label": "favorite food",
|
| 816 |
+
"examples": [
|
| 817 |
+
"What is your favorite food?",
|
| 818 |
+
"Favorite food?",
|
| 819 |
+
"What food do you like?",
|
| 820 |
+
"Do you have a favorite food?",
|
| 821 |
+
"What foods do you like?"
|
| 822 |
+
],
|
| 823 |
+
"response": "I like South Indian food in general, but I'm not very picky."
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"intent": "favorite movie",
|
| 827 |
+
"label": "favorite movie",
|
| 828 |
+
"examples": [
|
| 829 |
+
"What is your favorite movie?",
|
| 830 |
+
"Favorite movie?",
|
| 831 |
+
"What movie do you like?",
|
| 832 |
+
"Do you have a favorite movie?",
|
| 833 |
+
"What movies do you like?"
|
| 834 |
+
],
|
| 835 |
+
"response": "I dont have a single favourite movie. I like many movies, particularly the ones with a good story, like Harry Potter and Dangal."
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"intent": "parents",
|
| 839 |
+
"label": "parents",
|
| 840 |
+
"examples": [
|
| 841 |
+
"What are your parents doing?",
|
| 842 |
+
"What is you father doing?",
|
| 843 |
+
"What is your mother doing?",
|
| 844 |
+
"What is your father's job?",
|
| 845 |
+
"What is your mother's job?",
|
| 846 |
+
"What do your parents do?",
|
| 847 |
+
"What do your parents work as?",
|
| 848 |
+
"What do your parents do for a living?"
|
| 849 |
+
],
|
| 850 |
+
"response": "My father is a buisnessman, and my mother is a government employee."
|
| 851 |
+
},
|
| 852 |
+
{
|
| 853 |
+
"intent": "siblings",
|
| 854 |
+
"label": "siblings",
|
| 855 |
+
"examples": [
|
| 856 |
+
"Do you have any siblings?",
|
| 857 |
+
"Are you an only child?",
|
| 858 |
+
"Do you have a brother or sister?",
|
| 859 |
+
"How many siblings do you have?",
|
| 860 |
+
"What are your siblings doing?"
|
| 861 |
+
],
|
| 862 |
+
"response": "I have no siblings. I am an only child."
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"intent": "maritial status",
|
| 866 |
+
"label": "maritial status",
|
| 867 |
+
"examples": [
|
| 868 |
+
"Are you married?",
|
| 869 |
+
"Are you single?",
|
| 870 |
+
"Do you have a partner?",
|
| 871 |
+
"Are you in a relationship?",
|
| 872 |
+
"Do you have a girlfriend?",
|
| 873 |
+
"Are you dating?"
|
| 874 |
+
],
|
| 875 |
+
"response": [
|
| 876 |
+
"I am as single as anyone could be.",
|
| 877 |
+
"Single at the start of the year. Single at the end of the year. Consistency is key."
|
| 878 |
+
]
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"intent": "interests",
|
| 882 |
+
"label": "interests",
|
| 883 |
+
"examples": [
|
| 884 |
+
"Is there someone you like?",
|
| 885 |
+
"Are you interested in anyone?",
|
| 886 |
+
"Do you have a crush?",
|
| 887 |
+
"Whom do you like?",
|
| 888 |
+
"Who is your crush?"
|
| 889 |
+
],
|
| 890 |
+
"response": "There could be someone I like. Maybe I'm lying. Good luck finding out.",
|
| 891 |
+
"children": [
|
| 892 |
+
{
|
| 893 |
+
"intent": "who do you like",
|
| 894 |
+
"label": "who do you like",
|
| 895 |
+
"examples": [
|
| 896 |
+
"Who is it?",
|
| 897 |
+
"Whom do you like?",
|
| 898 |
+
"Who is your crush?"
|
| 899 |
+
],
|
| 900 |
+
"response": "No comments."
|
| 901 |
+
}
|
| 902 |
+
]
|
| 903 |
+
},
|
| 904 |
+
{
|
| 905 |
+
"intent": "age",
|
| 906 |
+
"label": "age",
|
| 907 |
+
"examples": [
|
| 908 |
+
"How old are you?",
|
| 909 |
+
"What is your age?",
|
| 910 |
+
"When were you born?",
|
| 911 |
+
"When is your birthday?",
|
| 912 |
+
"What year were you born?"
|
| 913 |
+
],
|
| 914 |
+
"response": "I was born on November 2, 2005. Which means I am 19 years old.",
|
| 915 |
+
"children": [
|
| 916 |
+
{
|
| 917 |
+
"intent": "birth date",
|
| 918 |
+
"label": "birth date",
|
| 919 |
+
"examples": [
|
| 920 |
+
"When is your birthday?",
|
| 921 |
+
"What is your birth date?",
|
| 922 |
+
"When were you born?",
|
| 923 |
+
"What year were you born?"
|
| 924 |
+
],
|
| 925 |
+
"response": "I was born on November 2, 2005."
|
| 926 |
+
},
|
| 927 |
+
{
|
| 928 |
+
"intent": "current age",
|
| 929 |
+
"label": "current age",
|
| 930 |
+
"examples": [
|
| 931 |
+
"How old are you?",
|
| 932 |
+
"What is your age?",
|
| 933 |
+
"How many years old are you?"
|
| 934 |
+
],
|
| 935 |
+
"response": "I am 19 years old."
|
| 936 |
+
}
|
| 937 |
+
]
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"intent": "resume",
|
| 941 |
+
"label": "resume",
|
| 942 |
+
"examples": [
|
| 943 |
+
"Resume",
|
| 944 |
+
"Can I see your resume?",
|
| 945 |
+
"Show me your resume",
|
| 946 |
+
"Send me your resume"
|
| 947 |
+
],
|
| 948 |
+
"response": "Here is my resume.<br><img src = '/static/resume_sample.webp' onclick = 'requestFullscreen(this)'>"
|
| 949 |
+
}
|
| 950 |
+
]
|
| 951 |
+
}
|
tree_data/portfolio_tree_data.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:23dd6a8ccbe038d78153e6ce7f933979f9a48be617a835a31e29e738d8781cbe
|
| 3 |
+
size 679822
|
util.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from random import randint
|
| 2 |
+
def select_random_from_list(l):
|
| 3 |
+
if isinstance(l, list):
|
| 4 |
+
return l[randint(0,len(l)-1)]
|
| 5 |
+
else:
|
| 6 |
+
return l
|
voice.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import wave
|
| 3 |
+
import json
|
| 4 |
+
import subprocess
|
| 5 |
+
import tempfile
|
| 6 |
+
from vosk import Model, KaldiRecognizer
|
| 7 |
+
|
| 8 |
+
os.environ['KALDI_LOG_VERBOSE'] = '0'
|
| 9 |
+
|
| 10 |
+
def transcribe_audio(audio_path):
|
| 11 |
+
"""
|
| 12 |
+
Transcribes a WAV file using vosk-model-small-en-us-0.15. The file is converted to 16kHz mono PCM if necessary.
|
| 13 |
+
Parameters:
|
| 14 |
+
1. audio_path (str): Path to the input audio file.
|
| 15 |
+
"""
|
| 16 |
+
model_path = "vosk-model-small-en-us-0.15"
|
| 17 |
+
if not os.path.exists(model_path):
|
| 18 |
+
raise FileNotFoundError(f"Vosk model not found at: {model_path}")
|
| 19 |
+
|
| 20 |
+
# Create temporary file for processed audio
|
| 21 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_wav:
|
| 22 |
+
tmp_path = tmp_wav.name
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
# Convert input audio to required format using ffmpeg
|
| 26 |
+
subprocess.run([
|
| 27 |
+
"ffmpeg", "-y", "-i", audio_path,
|
| 28 |
+
"-ar", "16000", "-ac", "1", "-c:a", "pcm_s16le",
|
| 29 |
+
tmp_path
|
| 30 |
+
], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
|
| 31 |
+
|
| 32 |
+
# Open processed audio
|
| 33 |
+
wf = wave.open(tmp_path, "rb")
|
| 34 |
+
model = Model(model_path)
|
| 35 |
+
recognizer = KaldiRecognizer(model, wf.getframerate())
|
| 36 |
+
|
| 37 |
+
results = []
|
| 38 |
+
while True:
|
| 39 |
+
data = wf.readframes(4000)
|
| 40 |
+
if len(data) == 0:
|
| 41 |
+
break
|
| 42 |
+
if recognizer.AcceptWaveform(data):
|
| 43 |
+
res = json.loads(recognizer.Result())
|
| 44 |
+
results.append(res.get("text", ""))
|
| 45 |
+
|
| 46 |
+
final_res = json.loads(recognizer.FinalResult())
|
| 47 |
+
results.append(final_res.get("text", ""))
|
| 48 |
+
|
| 49 |
+
return " ".join(results).strip()
|
| 50 |
+
|
| 51 |
+
finally:
|
| 52 |
+
if os.path.exists(tmp_path):
|
| 53 |
+
os.remove(tmp_path)
|
vosk-model-small-en-us-0.15/README
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0cf286e4f7783306c5f6469b37db69228fb16803b03cae661edb2d7bba64ebb
|
| 3 |
+
size 199
|
vosk-model-small-en-us-0.15/am/final.mdl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75370a0137f9daf8f469dedd7daa4513ae7a621f03240c6e512e2b50b656a7b6
|
| 3 |
+
size 15962575
|
vosk-model-small-en-us-0.15/conf/mfcc.conf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e2228006d01d805ad1c267fee9f79709ca87ac51bd82b0e3f5c69ba543f0fc4
|
| 3 |
+
size 131
|
vosk-model-small-en-us-0.15/conf/model.conf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8f14cb1eeb07c762c371db648c6be688d347236155ca0f64fb13b6567a8ce81f
|
| 3 |
+
size 290
|
vosk-model-small-en-us-0.15/graph/Gr.fst
ADDED
|
|
Git LFS Details
|
vosk-model-small-en-us-0.15/graph/HCLr.fst
ADDED
|
|
Git LFS Details
|
vosk-model-small-en-us-0.15/graph/disambig_tid.int
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9ad87cc166d0998f08f758f47a6223a120dfffcbee805c4849c3aa5e6bb3c0fc
|
| 3 |
+
size 102
|
vosk-model-small-en-us-0.15/graph/phones/word_boundary.int
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:da199d9c991e0e84681ddbb34627b915b26302d50a8fdaa23c51e2bc3a50b5c3
|
| 3 |
+
size 1761
|
vosk-model-small-en-us-0.15/ivector/final.dubm
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8c5d7dd69d2122313baaf19f61f35dd3fa18b70c62ac0687e311e1c46e6daca7
|
| 3 |
+
size 168048
|
vosk-model-small-en-us-0.15/ivector/final.ie
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3f37faf90c375b9e4740b569398b5829ed9cc07d19be6d441f72c3b71d7efcc6
|
| 3 |
+
size 8288887
|
vosk-model-small-en-us-0.15/ivector/final.mat
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ddd83586dc5f928cda8738b922c85ffe38fc789cb5f9151a712ca12f37265382
|
| 3 |
+
size 44975
|
vosk-model-small-en-us-0.15/ivector/global_cmvn.stats
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33be09afcc80059847a275c3d043b51f1ab954c7c2438ddbbf4745e8ba144ff9
|
| 3 |
+
size 1080
|
vosk-model-small-en-us-0.15/ivector/online_cmvn.conf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2f3571754b64297cb7efb2e7ca3df61995c5a45fcbb97188f90613552bb2dfe
|
| 3 |
+
size 95
|
vosk-model-small-en-us-0.15/ivector/splice.conf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f0c5f7c82d18eaf25d8bce470efa9f7741f88411fe428774bc0a9bb69a24756
|
| 3 |
+
size 35
|