Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- .env +15 -0
- CosmosDBHandlers/__pycache__/cosmosChatHistoryHandler.cpython-311.pyc +0 -0
- CosmosDBHandlers/__pycache__/cosmosConnector.cpython-311.pyc +0 -0
- CosmosDBHandlers/cosmosChatHistoryHandler.py +208 -0
- CosmosDBHandlers/cosmosConnector.py +343 -0
- README.md +2 -8
- __pycache__/cosmosConnector.cpython-311.pyc +0 -0
- chatbot-gradio.py +482 -0
- improvements.txt +24 -0
- models/__pycache__/converterModels.cpython-311.pyc +0 -0
- models/__pycache__/converterVectorStoreModels.cpython-311.pyc +0 -0
- models/converterModels.py +56 -0
- plugins/__pycache__/chatMemoryPlugin.cpython-311.pyc +0 -0
- plugins/__pycache__/converterPlugin.cpython-311.pyc +0 -0
- plugins/__pycache__/sqlGenerationPlugin.cpython-311.pyc +0 -0
- plugins/chatMemoryPlugin.py +42 -0
- plugins/converterPlugin.py +143 -0
- requirements.txt +3 -0
.env
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
AZURE_OPENAI_KEY = 74fO9RE4s4f7HTSd9SM19Adw6rnECwUuBnfY593dPI7xSHa057RHJQQJ99BEACfhMk5XJ3w3AAAAACOGFVJQ
|
| 2 |
+
|
| 3 |
+
OPENAI_API_TYPE = azure
|
| 4 |
+
|
| 5 |
+
AZURE_OPENAI_API_VERSION = 2024-12-01-preview
|
| 6 |
+
AZURE_OPENAI_DEPLOYMENT_NAME = gpt-4o-mini
|
| 7 |
+
AZURE_OPENAI_ENDPOINT = https://tal-chatbot-resource2.cognitiveservices.azure.com/openai/deployments/gpt-4o-mini/chat/completions?api-version=2025-01-01-preview
|
| 8 |
+
|
| 9 |
+
OPENAI_EMBEDDINGS_MODEL_NAME = text-embedding-ada-002
|
| 10 |
+
OPENAI_EMBEDDINGS_MODEL_DEPLOYMENT = text-embedding-ada-002
|
| 11 |
+
OPENAI_API_ENDPOINT = https://tal-chatbot-resource2.cognitiveservices.azure.com/
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
AZURE_COSMOS_DB_ENDPOINT = https://tal-chatbot.documents.azure.com:443/
|
| 15 |
+
AZURE_COSMOS_DB_KEY = 6XG3CwRPJeHWAufiMNbWNS2PhBfoSMtPEP5qNGPQJFulXqgJfR9K3xO1sgegOq9vkjwSgmIDqA7hACDbWIzPVA==
|
CosmosDBHandlers/__pycache__/cosmosChatHistoryHandler.cpython-311.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
CosmosDBHandlers/__pycache__/cosmosConnector.cpython-311.pyc
ADDED
|
Binary file (18.7 kB). View file
|
|
|
CosmosDBHandlers/cosmosChatHistoryHandler.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# cosmosConnector.py
|
| 3 |
+
from azure.cosmos import exceptions
|
| 4 |
+
from datetime import datetime, timedelta, timezone
|
| 5 |
+
import uuid
|
| 6 |
+
from langchain_openai import AzureOpenAIEmbeddings
|
| 7 |
+
import os
|
| 8 |
+
from azure.cosmos import CosmosClient, PartitionKey
|
| 9 |
+
from typing import List, Optional, Dict
|
| 10 |
+
import logging
|
| 11 |
+
import os
|
| 12 |
+
from dotenv import load_dotenv
|
| 13 |
+
load_dotenv()
|
| 14 |
+
# Initialize Cosmos DB containers
|
| 15 |
+
|
| 16 |
+
class ChatMemoryHandler():
|
| 17 |
+
def __init__(self, logger: Optional[logging.Logger] = None):
|
| 18 |
+
self.cosmos_client = CosmosClient(
|
| 19 |
+
os.getenv("AZURE_COSMOS_DB_ENDPOINT"),
|
| 20 |
+
os.getenv("AZURE_COSMOS_DB_KEY")
|
| 21 |
+
)
|
| 22 |
+
self.logger = logger
|
| 23 |
+
self.indexing_policy = {
|
| 24 |
+
"indexingMode": "consistent",
|
| 25 |
+
"includedPaths": [{"path": "/*"}], # Indexes all properties, including nested
|
| 26 |
+
"excludedPaths": [
|
| 27 |
+
{
|
| 28 |
+
"path": '/"_etag"/?'
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"path": "/embedding/*"
|
| 32 |
+
}
|
| 33 |
+
],
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
self.vector_embedding_policy = {
|
| 38 |
+
"vectorEmbeddings": [
|
| 39 |
+
{
|
| 40 |
+
"path": "/embedding",
|
| 41 |
+
"dataType": "float32",
|
| 42 |
+
"distanceFunction": "cosine",
|
| 43 |
+
"dimensions": 1536,
|
| 44 |
+
}
|
| 45 |
+
]
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
self.embedding_model = AzureOpenAIEmbeddings(
|
| 49 |
+
azure_endpoint=os.environ["OPENAI_API_ENDPOINT"],
|
| 50 |
+
azure_deployment=os.environ["OPENAI_EMBEDDINGS_MODEL_DEPLOYMENT"],
|
| 51 |
+
api_key=os.environ["AZURE_OPENAI_KEY"]
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
self.database = self.cosmos_client.create_database_if_not_exists("TAL_ChatDB")
|
| 55 |
+
|
| 56 |
+
# Container for chat history
|
| 57 |
+
self.chat_container = self.database.create_container_if_not_exists(
|
| 58 |
+
id="ChatHistory",
|
| 59 |
+
partition_key=PartitionKey(path="/sessionId"),
|
| 60 |
+
indexing_policy=self.indexing_policy,
|
| 61 |
+
vector_embedding_policy=self.vector_embedding_policy
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# Container for SQL queries
|
| 65 |
+
self.sql_container = self.database.create_container_if_not_exists(
|
| 66 |
+
id="GeneratedQueries",
|
| 67 |
+
partition_key=PartitionKey(path="/queryType")
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
async def _generate_embedding(self, query: str) -> List[float]:
|
| 71 |
+
"""Generate embedding for the given query using Azure OpenAI"""
|
| 72 |
+
try:
|
| 73 |
+
return self.embedding_model.embed_query(query)
|
| 74 |
+
except Exception as e:
|
| 75 |
+
self.logger.error(f"Embedding generation failed: {str(e)}")
|
| 76 |
+
raise
|
| 77 |
+
|
| 78 |
+
async def log_interaction(self, session_id: str, question: str, function_used: str, answer: str):
|
| 79 |
+
try:
|
| 80 |
+
chat_item = {
|
| 81 |
+
"id": str(uuid.uuid4()),
|
| 82 |
+
"sessionId": session_id,
|
| 83 |
+
"question": question,
|
| 84 |
+
"functionUsed": function_used,
|
| 85 |
+
"answer": answer,
|
| 86 |
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
| 87 |
+
"embedding": await self._generate_embedding(question)
|
| 88 |
+
}
|
| 89 |
+
self.chat_container.create_item(body=chat_item)
|
| 90 |
+
except Exception as e:
|
| 91 |
+
self.logger.error(f"Failed to log chat interaction: {str(e)}")
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
async def log_sql_query(self, original_question: str, generated_sql: str):
|
| 95 |
+
try:
|
| 96 |
+
sql_item = {
|
| 97 |
+
"id": str(uuid.uuid4()),
|
| 98 |
+
"queryType": "generated",
|
| 99 |
+
"originalQuestion": original_question,
|
| 100 |
+
"generatedSql": generated_sql,
|
| 101 |
+
"executionStatus": "generated",
|
| 102 |
+
"timestamp": datetime.now(timezone.utc).isoformat()
|
| 103 |
+
}
|
| 104 |
+
self.sql_container.create_item(body=sql_item)
|
| 105 |
+
except Exception as e:
|
| 106 |
+
self.logger.error(f"Failed to log SQL query: {str(e)}")
|
| 107 |
+
|
| 108 |
+
async def get_semantic_faqs(self, limit: int = 5, threshold: float = 0.1) -> List[Dict]:
|
| 109 |
+
"""Retrieve FAQs using vector embeddings for semantic similarity"""
|
| 110 |
+
try:
|
| 111 |
+
query = """
|
| 112 |
+
SELECT c.question FROM c
|
| 113 |
+
"""
|
| 114 |
+
raw_results = list(self.chat_container.query_items(
|
| 115 |
+
query=query,
|
| 116 |
+
enable_cross_partition_query=True,
|
| 117 |
+
max_item_count=-1
|
| 118 |
+
))
|
| 119 |
+
|
| 120 |
+
# Group by question in Python
|
| 121 |
+
from collections import Counter
|
| 122 |
+
question_counts = Counter(item['question'] for item in raw_results)
|
| 123 |
+
top_questions = question_counts.most_common(limit)
|
| 124 |
+
|
| 125 |
+
# Generate embeddings for top questions
|
| 126 |
+
faq_embeddings = {}
|
| 127 |
+
for question_text, count in top_questions:
|
| 128 |
+
embedding = await self._generate_embedding(question_text)
|
| 129 |
+
faq_embeddings[question_text] = {
|
| 130 |
+
'embedding': embedding,
|
| 131 |
+
'count': count
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
# Cluster similar questions
|
| 135 |
+
clustered_faqs = []
|
| 136 |
+
processed = set()
|
| 137 |
+
|
| 138 |
+
for text, data in faq_embeddings.items():
|
| 139 |
+
if text in processed:
|
| 140 |
+
continue
|
| 141 |
+
|
| 142 |
+
query = """
|
| 143 |
+
SELECT TOP 50 c.question, VectorDistance(c.embedding, @embedding) as distance
|
| 144 |
+
FROM c
|
| 145 |
+
ORDER BY VectorDistance(c.embedding, @embedding)
|
| 146 |
+
"""
|
| 147 |
+
parameters = [{"name": "@embedding", "value": data['embedding']}]
|
| 148 |
+
|
| 149 |
+
similar_results = list(self.chat_container.query_items(
|
| 150 |
+
query=query,
|
| 151 |
+
parameters=parameters,
|
| 152 |
+
enable_cross_partition_query=True
|
| 153 |
+
))
|
| 154 |
+
|
| 155 |
+
similarity_threshold = threshold
|
| 156 |
+
filtered_results = []
|
| 157 |
+
for item in similar_results:
|
| 158 |
+
similarity = 1 - item['distance'] # Convert distance to similarity
|
| 159 |
+
if similarity <= similarity_threshold:
|
| 160 |
+
filtered_results.append(item['question'])
|
| 161 |
+
|
| 162 |
+
# Count occurrences of similar questions
|
| 163 |
+
similar_question_counts = Counter(filtered_results)
|
| 164 |
+
cluster_count = sum(similar_question_counts.values())
|
| 165 |
+
|
| 166 |
+
clustered_faqs.append({
|
| 167 |
+
"representative_question": text,
|
| 168 |
+
"similar_questions": list(similar_question_counts.keys()),
|
| 169 |
+
"total_occurrences": cluster_count,
|
| 170 |
+
"similarity_scores": {q: 1 - item['distance'] for item in similar_results for q in [item['question']] if 1 - item['distance'] >= similarity_threshold}
|
| 171 |
+
})
|
| 172 |
+
|
| 173 |
+
# Mark all similar questions as processed
|
| 174 |
+
processed.update(filtered_results)
|
| 175 |
+
clustered_faqs.append({
|
| 176 |
+
"representative_question": text,
|
| 177 |
+
"similar_questions": [text],
|
| 178 |
+
"total_occurrences": data['count'],
|
| 179 |
+
"similarity_scores": {text: 1.0}
|
| 180 |
+
})
|
| 181 |
+
processed.add(text)
|
| 182 |
+
|
| 183 |
+
return sorted(clustered_faqs[:limit], key=lambda x: x['total_occurrences'], reverse=True)
|
| 184 |
+
|
| 185 |
+
except exceptions.CosmosHttpResponseError as ex:
|
| 186 |
+
print(f"Cosmos DB error: {ex}")
|
| 187 |
+
self.logger.error(f"Semantic FAQ retrieval failed: {str(e)}")
|
| 188 |
+
return []
|
| 189 |
+
except Exception as e:
|
| 190 |
+
if self.logger:
|
| 191 |
+
self.logger.error(f"Semantic FAQ retrieval failed: {str(e)}")
|
| 192 |
+
return []
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
import asyncio
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
handler = ChatMemoryHandler()
|
| 200 |
+
|
| 201 |
+
async def main():
|
| 202 |
+
faqs = await handler.get_semantic_faqs()
|
| 203 |
+
for faq in faqs:
|
| 204 |
+
|
| 205 |
+
print("\n",faq["representative_question"],faq["similar_questions"],"\n")
|
| 206 |
+
|
| 207 |
+
if __name__ == "__main__":
|
| 208 |
+
asyncio.run(main())
|
CosmosDBHandlers/cosmosConnector.py
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cosmosConnector.py
|
| 2 |
+
from jsonschema import ValidationError
|
| 3 |
+
from langchain_openai import AzureOpenAIEmbeddings
|
| 4 |
+
from models.converterModels import PowerConverter
|
| 5 |
+
import os
|
| 6 |
+
from azure.cosmos import CosmosClient
|
| 7 |
+
from typing import List, Optional, Dict
|
| 8 |
+
import logging
|
| 9 |
+
import os
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
from semantic_kernel.functions import kernel_function
|
| 12 |
+
from rapidfuzz import process, fuzz
|
| 13 |
+
|
| 14 |
+
load_dotenv()
|
| 15 |
+
# Initialize logging
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
class CosmosLampHandler:
|
| 19 |
+
|
| 20 |
+
def __init__(self, logger: Optional[logging.Logger] = None):
|
| 21 |
+
self.client = CosmosClient(
|
| 22 |
+
os.getenv("AZURE_COSMOS_DB_ENDPOINT"),
|
| 23 |
+
os.getenv("AZURE_COSMOS_DB_KEY")
|
| 24 |
+
)
|
| 25 |
+
self.database = self.client.get_database_client("TAL_DB")
|
| 26 |
+
self.container = self.database.get_container_client("Converters_with_embeddings")
|
| 27 |
+
self.logger = logging.Logger("test")
|
| 28 |
+
# self.logger = logger
|
| 29 |
+
self.embedding_model = AzureOpenAIEmbeddings(
|
| 30 |
+
azure_endpoint=os.environ["OPENAI_API_ENDPOINT"],
|
| 31 |
+
azure_deployment=os.environ["OPENAI_EMBEDDINGS_MODEL_DEPLOYMENT"],
|
| 32 |
+
api_key=os.environ["AZURE_OPENAI_KEY"]
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
def _fuzzy_match_lamp(self, query: str, targets: list[str], threshold=60) -> list:
|
| 36 |
+
"""Advanced partial matching"""
|
| 37 |
+
from rapidfuzz import process, fuzz
|
| 38 |
+
|
| 39 |
+
normalized_query = self._normalize_lamp_name(query)
|
| 40 |
+
normalized_targets = [self._normalize_lamp_name(t) for t in targets]
|
| 41 |
+
|
| 42 |
+
return process.extract(
|
| 43 |
+
normalized_query,
|
| 44 |
+
normalized_targets,
|
| 45 |
+
scorer=fuzz.token_set_ratio,
|
| 46 |
+
score_cutoff=threshold
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
def _normalize_lamp_name(self,name: str) -> str:
|
| 50 |
+
"""Standardize lamp names for matching"""
|
| 51 |
+
return (
|
| 52 |
+
name.lower()
|
| 53 |
+
.replace(",", ".")
|
| 54 |
+
.replace("-", " ")
|
| 55 |
+
.replace("/", " ")
|
| 56 |
+
.translate(str.maketrans("", "", "()"))
|
| 57 |
+
.strip()
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
async def _generate_embedding(self, query: str) -> List[float]:
|
| 61 |
+
"""Generate embedding for the given query using Azure OpenAI"""
|
| 62 |
+
try:
|
| 63 |
+
return self.embedding_model.embed_query(query)
|
| 64 |
+
except Exception as e:
|
| 65 |
+
self.logger.error(f"Embedding generation failed: {str(e)}")
|
| 66 |
+
raise
|
| 67 |
+
|
| 68 |
+
async def get_converter_info(self, artnr:int) -> PowerConverter:
|
| 69 |
+
"""Get information about a converter from its artnr"""
|
| 70 |
+
try:
|
| 71 |
+
parameters = [{"name": "@artnr", "value": artnr}]
|
| 72 |
+
query = "SELECT * FROM c WHERE c.artnr = @artnr"
|
| 73 |
+
|
| 74 |
+
# Collect results properly
|
| 75 |
+
result = self.container.query_items(
|
| 76 |
+
query=query,
|
| 77 |
+
parameters=parameters,
|
| 78 |
+
enable_cross_partition_query=True
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
if not result:
|
| 82 |
+
return None
|
| 83 |
+
|
| 84 |
+
else:
|
| 85 |
+
for r in result:
|
| 86 |
+
converter = PowerConverter(**r)
|
| 87 |
+
|
| 88 |
+
return converter
|
| 89 |
+
|
| 90 |
+
except Exception as e:
|
| 91 |
+
self.logger.error(f"Failed to retrieve converter {artnr} - {e}")
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
async def get_compatible_lamps(self, artnr: int) -> List[str]:
|
| 95 |
+
"""Get compatible lamps for a converter with fuzzy matching"""
|
| 96 |
+
try:
|
| 97 |
+
parameters = [{"name": "@artnr", "value": artnr}]
|
| 98 |
+
query = "SELECT * FROM c WHERE c.artnr = @artnr"
|
| 99 |
+
|
| 100 |
+
# Collect results properly
|
| 101 |
+
results = [item for item in list(self.container.query_items(
|
| 102 |
+
query=query,
|
| 103 |
+
parameters=parameters
|
| 104 |
+
))]
|
| 105 |
+
|
| 106 |
+
if not results:
|
| 107 |
+
return []
|
| 108 |
+
|
| 109 |
+
return list(results[0]["lamps"].keys())
|
| 110 |
+
|
| 111 |
+
except Exception as e:
|
| 112 |
+
self.logger.error(f"Failed to get compatible lamps: {str(e)}")
|
| 113 |
+
return []
|
| 114 |
+
|
| 115 |
+
async def get_converters_by_lamp_type(self, lamp_type: str, threshold: int = 75) -> List[PowerConverter]:
|
| 116 |
+
"""Get converters with fuzzy-matched lamp types"""
|
| 117 |
+
try:
|
| 118 |
+
# Case-insensitive search with fuzzy matching
|
| 119 |
+
query = """
|
| 120 |
+
SELECT
|
| 121 |
+
*
|
| 122 |
+
FROM c WHERE IS_DEFINED(c.lamps)"""
|
| 123 |
+
converters = []
|
| 124 |
+
results = list(self.container.query_items(
|
| 125 |
+
query=query,
|
| 126 |
+
enable_cross_partition_query=True))
|
| 127 |
+
for item in results:
|
| 128 |
+
lamp_keys = item.get("lamps", {}).keys()
|
| 129 |
+
lamp_type = self._normalize_lamp_name(lamp_type)
|
| 130 |
+
matches = self._fuzzy_match_lamp(lamp_type, lamp_keys)
|
| 131 |
+
|
| 132 |
+
if matches:
|
| 133 |
+
converters.append(PowerConverter(**item))
|
| 134 |
+
|
| 135 |
+
if not converters:
|
| 136 |
+
return []
|
| 137 |
+
|
| 138 |
+
return converters
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
self.logger.error(f"Lamp type search failed: {str(e)}")
|
| 142 |
+
return []
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
async def get_lamp_limits(self, artnr: int, lamp_type: str) -> Dict[str, int]:
|
| 147 |
+
"""Get lamp limits with typo tolerance"""
|
| 148 |
+
try:
|
| 149 |
+
parameters = [{"name": "@artnr", "value": artnr}]
|
| 150 |
+
query = """
|
| 151 |
+
SELECT c.lamps FROM c
|
| 152 |
+
WHERE c.artnr = @artnr
|
| 153 |
+
"""
|
| 154 |
+
results_iter = list(self.container.query_items(
|
| 155 |
+
query=query,
|
| 156 |
+
parameters=parameters
|
| 157 |
+
))
|
| 158 |
+
|
| 159 |
+
results = [item for item in results_iter] # Collect results asynchronously
|
| 160 |
+
|
| 161 |
+
if not results:
|
| 162 |
+
return {}
|
| 163 |
+
|
| 164 |
+
lamps = results[0]["lamps"]
|
| 165 |
+
lamp_keys = list(lamps.keys())
|
| 166 |
+
|
| 167 |
+
# Fuzzy match with normalization
|
| 168 |
+
matches = self._fuzzy_match_lamp(self._normalize_lamp_name(lamp_type), lamp_keys, threshold=60)
|
| 169 |
+
if not matches:
|
| 170 |
+
raise ValueError(f"No matching lamp type found for '{lamp_type}'")
|
| 171 |
+
|
| 172 |
+
# Get best match from original keys using match index
|
| 173 |
+
best_match = lamp_keys[matches[0][2]]
|
| 174 |
+
|
| 175 |
+
return {
|
| 176 |
+
"min": int(lamps[best_match]["min"]),
|
| 177 |
+
"max": int(lamps[best_match]["max"])
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
except Exception as e:
|
| 181 |
+
self.logger.error(f"Failed to get lamp limits: {str(e)}")
|
| 182 |
+
raise
|
| 183 |
+
|
| 184 |
+
async def get_converters_by_dimming(
|
| 185 |
+
self,
|
| 186 |
+
dimming_type: str,
|
| 187 |
+
voltage_current: Optional[str] = None,
|
| 188 |
+
lamp_type: Optional[str] = None,
|
| 189 |
+
threshold: int = 75
|
| 190 |
+
) -> List[PowerConverter]:
|
| 191 |
+
"""Search converters by dimming type and voltage/current/lamp_type specifications with fuzzy matching"""
|
| 192 |
+
try:
|
| 193 |
+
# Base query construction
|
| 194 |
+
query = "SELECT * FROM c WHERE IS_DEFINED(c.dimmability)"
|
| 195 |
+
results = list(self.container.query_items(
|
| 196 |
+
query=query,
|
| 197 |
+
enable_cross_partition_query=True
|
| 198 |
+
))
|
| 199 |
+
|
| 200 |
+
converters = []
|
| 201 |
+
for item in results:
|
| 202 |
+
# Fuzzy match converter type if specified
|
| 203 |
+
if voltage_current:
|
| 204 |
+
item_type = item.get("type", "")
|
| 205 |
+
item_types = item_type.split(" ")
|
| 206 |
+
for conv_type in item_types: # handle types like 24V DC
|
| 207 |
+
if fuzz.ratio(conv_type.lower(), voltage_current.lower()) < threshold:
|
| 208 |
+
continue
|
| 209 |
+
if lamp_type:
|
| 210 |
+
item_lamps = item.get("lamps", "")
|
| 211 |
+
lamp_type = self._normalize_lamp_name(lamp_type)
|
| 212 |
+
lamp_matches = self._fuzzy_match_lamp(lamp_type, item_lamps.keys())
|
| 213 |
+
if not lamp_matches:
|
| 214 |
+
continue
|
| 215 |
+
|
| 216 |
+
# Fuzzy match dimming types
|
| 217 |
+
if dimming_type!= None:
|
| 218 |
+
dimmability = item.get("dimmability", "")
|
| 219 |
+
match_types = dimmability.split('/')
|
| 220 |
+
match_types += dimmability.split(" ")
|
| 221 |
+
for option in match_types:
|
| 222 |
+
if fuzz.ratio(option.lower().strip(), dimming_type.lower()) >= threshold:
|
| 223 |
+
converters.append(PowerConverter(**item))
|
| 224 |
+
break
|
| 225 |
+
else:
|
| 226 |
+
converters.append(PowerConverter(**item))
|
| 227 |
+
break
|
| 228 |
+
|
| 229 |
+
self.logger.info(f"Found {len(converters)} converters matching criteria")
|
| 230 |
+
return converters
|
| 231 |
+
|
| 232 |
+
except Exception as e:
|
| 233 |
+
self.logger.error(f"Dimming query failed: {str(e)}")
|
| 234 |
+
return []
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
async def query_converters(self, query: str) -> List[PowerConverter]:
|
| 239 |
+
try:
|
| 240 |
+
print(f"Executing query: {query}")
|
| 241 |
+
items = list(self.container.query_items(
|
| 242 |
+
query=query,
|
| 243 |
+
enable_cross_partition_query=True
|
| 244 |
+
))
|
| 245 |
+
print(f"Query returned {len(items)} items")
|
| 246 |
+
items = items[:10]
|
| 247 |
+
|
| 248 |
+
items = [PowerConverter(**item) for item in items] if items else []
|
| 249 |
+
|
| 250 |
+
self.logger.info(f"Query returned {len(items)} items after conversion")
|
| 251 |
+
|
| 252 |
+
return str(items)
|
| 253 |
+
|
| 254 |
+
except Exception as e:
|
| 255 |
+
self.logger.info(f"Query failed: {str(e)}")
|
| 256 |
+
return f"Query failed: {str(e)}"
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
async def get_converters_by_voltage_current(
|
| 260 |
+
self,
|
| 261 |
+
artnr: Optional[int] = None,
|
| 262 |
+
current: Optional[str]=None,
|
| 263 |
+
input_voltage: Optional[str] = None,
|
| 264 |
+
output_voltage: Optional[str] = None
|
| 265 |
+
) -> List[PowerConverter]:
|
| 266 |
+
"""Query converters by voltage ranges"""
|
| 267 |
+
try:
|
| 268 |
+
# Handle ARTNR lookup
|
| 269 |
+
if artnr:
|
| 270 |
+
converter = await self.get_converter_info(artnr)
|
| 271 |
+
self.logger.info(f"Used converter info returned {converter}")
|
| 272 |
+
return [converter] if converter else []
|
| 273 |
+
|
| 274 |
+
# Parse voltage ranges
|
| 275 |
+
input_min, input_max = self._parse_voltage(input_voltage) if input_voltage else (None, None)
|
| 276 |
+
output_min, output_max = self._parse_voltage(output_voltage) if output_voltage else (None, None)
|
| 277 |
+
|
| 278 |
+
# Build query
|
| 279 |
+
query_parts = []
|
| 280 |
+
if input_min and input_max:
|
| 281 |
+
query_parts.append(f"c.nom_input_voltage_v.min <= {input_max} AND c.nom_input_voltage_v.max >= {input_min}")
|
| 282 |
+
self.logger.info(f"c.nom_input_voltage_v.min <= {input_max} AND c.nom_input_voltage_v.max >= {input_min}")
|
| 283 |
+
if output_min and output_max:
|
| 284 |
+
query_parts.append(f"c.output_voltage_v.min <= {output_max} AND c.output_voltage_v.max >= {output_min}")
|
| 285 |
+
self.logger.info(f"c.nom_input_voltage_v.min <= {input_max} AND c.nom_input_voltage_v.max >= {input_min}")
|
| 286 |
+
if current:
|
| 287 |
+
query_parts.append(f"c.type LIKE '%{current}%'")
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
query = "SELECT * FROM c" + (" WHERE " + " AND ".join(query_parts) if query_parts else "")
|
| 291 |
+
|
| 292 |
+
results = list(self.container.query_items(
|
| 293 |
+
query=query,
|
| 294 |
+
enable_cross_partition_query=True
|
| 295 |
+
))
|
| 296 |
+
|
| 297 |
+
return [PowerConverter(**item) for item in results]
|
| 298 |
+
|
| 299 |
+
except Exception as e:
|
| 300 |
+
self.logger.error(f"Voltage query failed: {str(e)}")
|
| 301 |
+
return []
|
| 302 |
+
|
| 303 |
+
def _parse_voltage(self, voltage_str: str) -> tuple[float, float]:
|
| 304 |
+
import re
|
| 305 |
+
voltage_str = voltage_str.strip().replace(',', '.')
|
| 306 |
+
voltage_str = re.sub(r'[^0-9.\-]', '', voltage_str)
|
| 307 |
+
match = re.match(r"^(\d+(?:\.\d+)?)(?:-+(\d+(?:\.\d+)?))?$", voltage_str)
|
| 308 |
+
if match:
|
| 309 |
+
min_v = float(match.group(1))
|
| 310 |
+
max_v = float(match.group(2)) if match.group(2) else min_v
|
| 311 |
+
return min_v, max_v
|
| 312 |
+
else:
|
| 313 |
+
return None, None
|
| 314 |
+
|
| 315 |
+
if __name__ == "__main__":
|
| 316 |
+
handler = CosmosLampHandler()
|
| 317 |
+
# Example usage
|
| 318 |
+
import asyncio
|
| 319 |
+
|
| 320 |
+
async def main():
|
| 321 |
+
# lamps = await handler.get_compatible_lamps(930573)
|
| 322 |
+
# print("Compatible lamps:", lamps)
|
| 323 |
+
|
| 324 |
+
# converters = await handler.get_converters_by_dimming(voltage_current="350ma",lamp_type="haloled")
|
| 325 |
+
# for result in converters:
|
| 326 |
+
# print(f"\t{result.name} (ARTNR: {result.artnr})")
|
| 327 |
+
# print(f"\tLamp types: {', '.join(result.lamps.keys())}\n")
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
conv = await handler.get_converters_by_lamp_type("boa")
|
| 332 |
+
print([c.artnr for c in conv])
|
| 333 |
+
limits = await handler.get_lamp_limits(930544, "boa")
|
| 334 |
+
print("Lamp limits:", limits)
|
| 335 |
+
# hybrid_results = await handler.hybrid_search("give me converters for boa wc which cost less than 50 ")
|
| 336 |
+
# print("Hybrid search results:")
|
| 337 |
+
# for result in hybrid_results:
|
| 338 |
+
# print(f"\t{result.converter_description} (ARTNR: {result.artnr})")
|
| 339 |
+
# print(f"\ttypes: {result.type}")
|
| 340 |
+
# print(f"\tprice: {result.price}")
|
| 341 |
+
# print(f'\tpdf_link: {result.pdf_link}\n')
|
| 342 |
+
|
| 343 |
+
asyncio.run(main())
|
README.md
CHANGED
|
@@ -1,12 +1,6 @@
|
|
| 1 |
---
|
| 2 |
title: SemanticKernelFinal
|
| 3 |
-
|
| 4 |
-
colorFrom: indigo
|
| 5 |
-
colorTo: red
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
title: SemanticKernelFinal
|
| 3 |
+
app_file: chatbot-gradio.py
|
|
|
|
|
|
|
| 4 |
sdk: gradio
|
| 5 |
+
sdk_version: 5.31.0
|
|
|
|
|
|
|
| 6 |
---
|
|
|
|
|
|
__pycache__/cosmosConnector.cpython-311.pyc
ADDED
|
Binary file (18.6 kB). View file
|
|
|
chatbot-gradio.py
ADDED
|
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import uuid
|
| 3 |
+
from semantic_kernel import Kernel
|
| 4 |
+
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion
|
| 5 |
+
from semantic_kernel.functions import kernel_function
|
| 6 |
+
from azure.cosmos import CosmosClient
|
| 7 |
+
from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import (
|
| 8 |
+
AzureChatPromptExecutionSettings,
|
| 9 |
+
)
|
| 10 |
+
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
|
| 11 |
+
from models.converterModels import PowerConverter
|
| 12 |
+
from plugins.converterPlugin import ConverterPlugin
|
| 13 |
+
from plugins.chatMemoryPlugin import ChatMemoryPlugin
|
| 14 |
+
import os
|
| 15 |
+
import gradio as gr
|
| 16 |
+
|
| 17 |
+
from dotenv import load_dotenv
|
| 18 |
+
load_dotenv()
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger("kernel")
|
| 21 |
+
logger.setLevel(logging.DEBUG)
|
| 22 |
+
handler = logging.StreamHandler()
|
| 23 |
+
handler.setFormatter(logging.Formatter(
|
| 24 |
+
"[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s"
|
| 25 |
+
))
|
| 26 |
+
logger.addHandler(handler)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Initialize Semantic Kernel
|
| 30 |
+
kernel = Kernel()
|
| 31 |
+
|
| 32 |
+
# Add Azure OpenAI Chat Service
|
| 33 |
+
kernel.add_service(AzureChatCompletion(
|
| 34 |
+
service_id="chat",
|
| 35 |
+
deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
|
| 36 |
+
endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
|
| 37 |
+
api_key=os.getenv("AZURE_OPENAI_KEY")
|
| 38 |
+
))
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class NL2SQLPlugin:
|
| 42 |
+
@kernel_function(name="generate_sql", description="Generate Cosmos DB SQL query")
|
| 43 |
+
async def generate_sql(self, question: str) -> str:
|
| 44 |
+
sql = await self._generate_sql_helper(question)
|
| 45 |
+
# Block DML commands first
|
| 46 |
+
if any(command in sql.upper() for command in ["DELETE", "UPDATE", "INSERT", "SET"]):
|
| 47 |
+
return "Invalid operation"
|
| 48 |
+
|
| 49 |
+
if "FROM converters c" in sql:
|
| 50 |
+
sql = sql.replace("FROM converters c", "FROM c")
|
| 51 |
+
if "SELECT *" not in sql and "FROM c" in sql:
|
| 52 |
+
sql = sql.replace("SELECT c.*,", "SELECT *")
|
| 53 |
+
sql = sql.replace("SELECT c.*", "SELECT *")
|
| 54 |
+
sql = sql.replace("SELECT c", "SELECT *")
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
return sql
|
| 58 |
+
|
| 59 |
+
async def _generate_sql_helper(self, question: str) -> str:
|
| 60 |
+
from semantic_kernel.contents import ChatHistory
|
| 61 |
+
|
| 62 |
+
chat_service = kernel.get_service("chat")
|
| 63 |
+
chat_history = ChatHistory()
|
| 64 |
+
chat_history.add_user_message(f"""Convert to Cosmos DB SQL: {question}
|
| 65 |
+
Collection: converters (alias 'c')
|
| 66 |
+
Fields:
|
| 67 |
+
- c.type (e.g., '350mA') - for queries related to current (mA) always refer to c.type
|
| 68 |
+
- c.artnr (numeric (int) article number e.g., 930546)
|
| 69 |
+
- c.output_voltage_v: dictionary with min/max values for output voltage
|
| 70 |
+
- c.output_voltage_v.min (e.g., 15)
|
| 71 |
+
- c.output_voltage_v.max (e.g., 40)
|
| 72 |
+
- c.nom_input_voltage_v: dictionary with min/max values for input voltage
|
| 73 |
+
- c.nom_input_voltage_v.min (e.g., 198)
|
| 74 |
+
- c.nom_input_voltage_v.max (e.g., 264)
|
| 75 |
+
- c.lamps: dictionary with min/max values for lamp types for this converter
|
| 76 |
+
- c.lamps["lamp_name"].min (e.g., 1)
|
| 77 |
+
- c.lamps["lamp_name"].max (e.g., 10)
|
| 78 |
+
- c.class (safety class)
|
| 79 |
+
- c.dimmability (e.g. if not dimmable 'NOT DIMMABLE'. if supports dimming, 'DALI/TOUCHDIM','MAINS DIM LC', '1-10V','CASAMBI' etc)
|
| 80 |
+
- c.listprice (e.g., 58)
|
| 81 |
+
- c.lifecycle (e.g., 'Active')
|
| 82 |
+
- c.size (e.g., '150x30x30')
|
| 83 |
+
- c.dimlist_type (e.g., 'DALI')
|
| 84 |
+
- c.pdf_link (link to product PDF)
|
| 85 |
+
- c.converter_description (e.g., 'POWERLED CONVERTER REMOTE 180mA 8W IP20 1-10V')
|
| 86 |
+
- c.ip (Ingress Protection, integer values e.g., 20,67)
|
| 87 |
+
- c.efficiency_full_load (e.g., 0.9)
|
| 88 |
+
- c.name (e.g., 'Power Converter 350mA')
|
| 89 |
+
- c.unit (e.g., 'PC')
|
| 90 |
+
- c.strain_relief (e.g., "NO", "YES")
|
| 91 |
+
Example document for reference:
|
| 92 |
+
c = {{
|
| 93 |
+
"id": "8797fff0-e0a8-4e23-aad0-06209881b1d3",
|
| 94 |
+
"type": "350mA",
|
| 95 |
+
"artnr": 984500,
|
| 96 |
+
"converter_description": "POWERLED CONVERTER REMOTE 350mA 18W IP20 DALI/TOUCHDIM",
|
| 97 |
+
"strain_relief": "YES",
|
| 98 |
+
"location": "INDOOR",
|
| 99 |
+
"dimmability": "DALI/TOUCHDIM",
|
| 100 |
+
"ccr_amplitude": "YES",
|
| 101 |
+
"efficiency_full_load": 0.85,
|
| 102 |
+
"ip": 20,
|
| 103 |
+
"class": 2,
|
| 104 |
+
"nom_input_voltage_v": {{"min": 220, "max": 240}},
|
| 105 |
+
"output_voltage_v": {{"min": 9, "max": 52}},
|
| 106 |
+
"barcode": "54 15233 15690 8",
|
| 107 |
+
"name": "POWERLED REMOTE CONVERTER (18.2W) TOUCH DALI DIM 350mA",
|
| 108 |
+
"listprice": 47,
|
| 109 |
+
"unit": "PC",
|
| 110 |
+
"lifecycle": "A",
|
| 111 |
+
"pdf_link": "...",
|
| 112 |
+
"lamps": {{
|
| 113 |
+
"Single led XPE": {{"min": 3, "max": 15}},
|
| 114 |
+
"Thinksmall/floorspot WC luxeon MX": {{"min": 1, "max": 4}},
|
| 115 |
+
"*MIX 6 monocolor": {{"min": 1, "max": 2}},
|
| 116 |
+
"Cedrus quantum": {{"min": 1, "max": 2}},
|
| 117 |
+
"*MIX 6 halosphere": {{"min": 1, "max": 2}},
|
| 118 |
+
"MIX 13 monocolor": {{"min": 1, "max": 1}},
|
| 119 |
+
"MIX 13 halosphere": {{"min": 1, "max": 1}},
|
| 120 |
+
"ORBITAL monocolor": {{"min": 1, "max": 1}},
|
| 121 |
+
"ORBITAL halosphere": {{"min": 1, "max": 1}},
|
| 122 |
+
"Beaufort²": {{"min": 1, "max": 1}},
|
| 123 |
+
"Beaufort": {{"min": 1, "max": 1}},
|
| 124 |
+
"Haloled": {{"min": 1, "max": 4}},
|
| 125 |
+
"B4": {{"min": 1, "max": 4}},
|
| 126 |
+
"MIX 26 monocolor": {{"min": 1, "max": 1}},
|
| 127 |
+
"*BOA WC": {{"min": 1, "max": 5}}
|
| 128 |
+
}},
|
| 129 |
+
"size": "160*42*30"
|
| 130 |
+
}}
|
| 131 |
+
SQL Guidelines (if needed):
|
| 132 |
+
- Always use SELECT * and never individual fields
|
| 133 |
+
- When current like 350mA is detected, always query the c.type field
|
| 134 |
+
- Always refer to fields in SELECT or WHERE clause using c.<field_name>
|
| 135 |
+
- For exact matches use: WHERE c.[field] = value
|
| 136 |
+
- For ranges use: WHERE c.[field].min = X AND c.[field].max = Y
|
| 137 |
+
- Check for dimmability support by using either != "NOT DIMMABLE" or = "NOT DIMMABLE"
|
| 138 |
+
- Do not use AS and cast key names
|
| 139 |
+
- For lamp compatibility: Use WHERE IS_DEFINED(c.lamps["lamp_name"]) to check if a specific lamp is supported, or WHERE IS_DEFINED(c.lamps) for any lamp support.
|
| 140 |
+
|
| 141 |
+
Examples:
|
| 142 |
+
- What is the price of 40063 : SELECT * FROM c WHERE c.artnr=40063
|
| 143 |
+
- Give me converters with an output voltage range of exactly 2-25 : SELECT * FROM c WHERE c.output_voltage_v.min=2 AND c.output_voltage_v.max=25
|
| 144 |
+
- Find converters with an input voltage range of exactly 90-264 : SELECT * FROM c WHERE c.nom_input_voltage_v.min = 90 AND c.nom_input_voltage_v.max = 264
|
| 145 |
+
- Find converters that support a specific lamp type (e.g., "B4") : SELECT * FROM c WHERE IS_DEFINED(c.lamps["B4"])
|
| 146 |
+
- Find converters that support any lamp (check for lamp compatibility) : SELECT * FROM c WHERE IS_DEFINED(c.lamps)
|
| 147 |
+
- Find converters with a specific IP rating (e.g., 67): SELECT * FROM c WHERE c.ip = 67
|
| 148 |
+
- List of 350mA converters compatible with Haloled: SELECT * FROM c WHERE IS_DEFINED(c.lamps["Haloled"]) ANlist oD c.type="350mA"
|
| 149 |
+
- List 700mA drivers: SELECT * FROM c WHERE c.type="700mA"
|
| 150 |
+
Return ONLY SQL without explanations""")
|
| 151 |
+
|
| 152 |
+
response = await chat_service.get_chat_message_content(
|
| 153 |
+
chat_history=chat_history,
|
| 154 |
+
settings=AzureChatPromptExecutionSettings()
|
| 155 |
+
)
|
| 156 |
+
# logger.info(f"Response dB schema{response}")
|
| 157 |
+
log_sql_func = kernel.get_function("ChatMemoryPlugin", "log_sql_query")
|
| 158 |
+
|
| 159 |
+
await kernel.invoke(
|
| 160 |
+
function=log_sql_func,
|
| 161 |
+
original_question=question,
|
| 162 |
+
generated_sql=response
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
return str(response)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
# Register plugins
|
| 169 |
+
kernel.add_plugin(ConverterPlugin(logger=logger), "CosmosDBPlugin")
|
| 170 |
+
kernel.add_plugin(ChatMemoryPlugin(logger=logger), "ChatMemoryPlugin")
|
| 171 |
+
kernel.add_plugin(NL2SQLPlugin(), "NL2SQLPlugin")
|
| 172 |
+
|
| 173 |
+
# Updated query handler using function calling
|
| 174 |
+
async def handle_query(user_input: str):
|
| 175 |
+
session_id = str(uuid.uuid4())
|
| 176 |
+
|
| 177 |
+
settings = AzureChatPromptExecutionSettings(
|
| 178 |
+
function_choice_behavior=FunctionChoiceBehavior.Auto(auto_invoke=True)
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
ARTNR_PATTERN = r'\b\d{6}\b'
|
| 182 |
+
|
| 183 |
+
prompt = f"""
|
| 184 |
+
You are a product catalog customer service chatbot for TAL BV. Answer questions about converters, their specifications and lamps. Process this user query:
|
| 185 |
+
{user_input}
|
| 186 |
+
|
| 187 |
+
artnr Pattern: {ARTNR_PATTERN}
|
| 188 |
+
|
| 189 |
+
Available functions:
|
| 190 |
+
- generate_sql: Creates SQL queries (use only for complex queries or schema keywords)
|
| 191 |
+
- query_converters: Executes SQL queries
|
| 192 |
+
- get_compatible_lamps: Simple artnr-based lamp queries
|
| 193 |
+
- get_converters_by_lamp_type: Simple lamp type searches
|
| 194 |
+
- get_lamp_limits: Simple artnr+lamp combinations
|
| 195 |
+
- get_converters_by_dimming: use when question contains dimming types WITHOUT artnr (if query contains mains c, dali, 1-10v, mains)
|
| 196 |
+
- get_converters_by_voltage_current: use for questions about input or output voltage
|
| 197 |
+
|
| 198 |
+
Decision Flow:
|
| 199 |
+
1. Identify synonyms :
|
| 200 |
+
output voltage = voltage forward = forward voltage = Vf
|
| 201 |
+
Driver = ledconverter = converter = power supply = gear
|
| 202 |
+
lamps = luminares
|
| 203 |
+
ip = Ingress Protection and NOT INPUT VOLTAGE
|
| 204 |
+
|
| 205 |
+
2. Check for explicit dimming types (dali/1-10V/mains/casambi):
|
| 206 |
+
- If found → use get_converters_by_dimming
|
| 207 |
+
- Include lamp_type parameter if lamp is mentioned
|
| 208 |
+
|
| 209 |
+
3. Use simple functions if query matches these patterns:
|
| 210 |
+
- "lamps for [artnr]" → get_compatible_lamps
|
| 211 |
+
- "converters for [lamp type]" → get_converters_by_lamp_type
|
| 212 |
+
- "min/max [lamp] for [artnr]" → get_lamp_limits
|
| 213 |
+
- "drivers on 24V output" → get_converters_by_voltage_current
|
| 214 |
+
- "drivers on 350ma" → get_converters_by_voltage_current
|
| 215 |
+
|
| 216 |
+
4. Use SQL generation ONLY when:
|
| 217 |
+
- Query contains schema keywords: price, type, ip, efficiency, size, class, strain relief, lifecycle,
|
| 218 |
+
- Combining multiple conditions (AND/OR/NOT)
|
| 219 |
+
- Needs complex filtering/sorting
|
| 220 |
+
- Requesting technical specifications for a specific converter like "dimming type of converter [artnr]", "size of [artnr]"
|
| 221 |
+
|
| 222 |
+
5. NEVER
|
| 223 |
+
- use get_converters_by_dimming when artnr Pattern is detected
|
| 224 |
+
- use get_converters_by_lamp_type when dimming type like dali, mains is mentioned
|
| 225 |
+
- use "ipXX" as input_voltage parameter in get_converters_by_voltage_current
|
| 226 |
+
- interpret "ip" as input voltage (IP = Ingress Protection)
|
| 227 |
+
|
| 228 |
+
6. For IP ratings:
|
| 229 |
+
- Extract using regex: r'ip[\s]?(\d+)'
|
| 230 |
+
- Use SQL: SELECT * FROM c WHERE c.ip = X
|
| 231 |
+
- NEVER route to voltage-related functions
|
| 232 |
+
|
| 233 |
+
6. If you cannot identify any relevant keywords, respond with a friendly message clarifying what you are and what they can ask for."
|
| 234 |
+
7. If no results are recieved, give an apologetic reason. Never respond with SQL query suggestions.
|
| 235 |
+
|
| 236 |
+
Examples:
|
| 237 |
+
User: "Show IP67 converters under €100" → generate_sql
|
| 238 |
+
User: "What lamps are compatible with 930560?" → get_compatible_lamps
|
| 239 |
+
User: "List of 1p20 drivers for haloled single on track" → get_converters_by_lamp_type(lamp_type="haloled single on track") → inspect returned converters
|
| 240 |
+
User: "List 700mA drivers with ip20 rating" → get_converters_by_voltage_current(current = "700mA") → inspect returned converters
|
| 241 |
+
User: "List of 350mA drivers" → get_converters_by_voltage_current(current = "350mA")
|
| 242 |
+
User: "What converters are compatible with haloled lamps?" → get_converters_by_lamp_type
|
| 243 |
+
User: "Voltage range for 930562" → generate_sql
|
| 244 |
+
User: "Dimming type of 930581" → generate_sql
|
| 245 |
+
User: "List of dali drivers on 24V output?" → get_converters_by_dimming"
|
| 246 |
+
User: 'List of 24V drivers for ledline medium power → get_converters_by_dimming(dimming_type=None, lamp_type="ledline medium power",voltage_current="24V")(or) get_converters_by_lamp_type(lamp_type="ledline medium power") → inspect returned converters '
|
| 247 |
+
"""
|
| 248 |
+
try:
|
| 249 |
+
result = await kernel.invoke_prompt(
|
| 250 |
+
prompt=prompt,
|
| 251 |
+
settings=settings
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
log_func = kernel.get_function("ChatMemoryPlugin", "log_interaction")
|
| 255 |
+
await log_func.invoke(
|
| 256 |
+
kernel=kernel,
|
| 257 |
+
session_id=session_id,
|
| 258 |
+
question=user_input,
|
| 259 |
+
function_used="handle_query",
|
| 260 |
+
answer=str(result)
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
return str(result)
|
| 264 |
+
|
| 265 |
+
except Exception as e:
|
| 266 |
+
# Handle errors properly
|
| 267 |
+
error_func = kernel.get_function("ChatMemoryPlugin", "log_interaction")
|
| 268 |
+
await error_func.invoke(
|
| 269 |
+
kernel=kernel,
|
| 270 |
+
session_id=session_id,
|
| 271 |
+
question=user_input,
|
| 272 |
+
function_used="error",
|
| 273 |
+
answer=str(e)
|
| 274 |
+
)
|
| 275 |
+
raise
|
| 276 |
+
|
| 277 |
+
# # Example usage
|
| 278 |
+
# async def main():
|
| 279 |
+
# # cm = ChatMemoryPlugin(logger)
|
| 280 |
+
# # await cm.get_semantic_faqs()
|
| 281 |
+
|
| 282 |
+
# while True:
|
| 283 |
+
# try:
|
| 284 |
+
# query = input("User: ")
|
| 285 |
+
# if query.lower() in ["exit", "quit"]:
|
| 286 |
+
# break
|
| 287 |
+
|
| 288 |
+
# response = await handle_query(query)
|
| 289 |
+
# print(response)
|
| 290 |
+
|
| 291 |
+
# except KeyboardInterrupt:
|
| 292 |
+
# break
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
# if __name__ == "__main__":
|
| 296 |
+
# import asyncio
|
| 297 |
+
# asyncio.run(main())
|
| 298 |
+
|
| 299 |
+
# --- Gradio UI ---
|
| 300 |
+
|
| 301 |
+
custom_css = """
|
| 302 |
+
#chatbot-toggle-btn {
|
| 303 |
+
position: fixed;
|
| 304 |
+
bottom: 30px;
|
| 305 |
+
right: 30px;
|
| 306 |
+
z-index: 10001;
|
| 307 |
+
background-color: #ED1C24;
|
| 308 |
+
color: white;
|
| 309 |
+
border: none;
|
| 310 |
+
border-radius: 50%;
|
| 311 |
+
width: 56px;
|
| 312 |
+
height: 56px;
|
| 313 |
+
font-size: 28px;
|
| 314 |
+
font-weight: bold;
|
| 315 |
+
cursor: pointer;
|
| 316 |
+
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.4);
|
| 317 |
+
display: flex;
|
| 318 |
+
align-items: center;
|
| 319 |
+
justify-content: center;
|
| 320 |
+
transition: all 0.3s ease;
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
#chatbot-panel {
|
| 324 |
+
position: fixed;
|
| 325 |
+
bottom: 10vw;
|
| 326 |
+
right: 2vw;
|
| 327 |
+
z-index: 10000;
|
| 328 |
+
width: 95vw;
|
| 329 |
+
max-width: 600px;
|
| 330 |
+
height: 90vh;
|
| 331 |
+
max-height: 700px;
|
| 332 |
+
background-color: #ffffff;
|
| 333 |
+
border-radius: 20px;
|
| 334 |
+
box-shadow: 0 4px 24px rgba(0, 0, 0, 0.25);
|
| 335 |
+
display: flex;
|
| 336 |
+
flex-direction: column;
|
| 337 |
+
overflow: hidden;
|
| 338 |
+
font-family: 'Arial', sans-serif;
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
@media (max-width: 600px) {
|
| 342 |
+
#chatbot-panel {
|
| 343 |
+
width: 100vw;
|
| 344 |
+
height: 100vh;
|
| 345 |
+
right: 0;
|
| 346 |
+
bottom: 0;
|
| 347 |
+
border-radius: 0;
|
| 348 |
+
}
|
| 349 |
+
#chatbot-toggle-btn {
|
| 350 |
+
right: 10px;
|
| 351 |
+
bottom: 10px;
|
| 352 |
+
width: 48px;
|
| 353 |
+
height: 48px;
|
| 354 |
+
font-size: 24px;
|
| 355 |
+
}
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
#chatbot-panel.hide {
|
| 359 |
+
display: none !important;
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
#chat-header {
|
| 363 |
+
background-color: #ED1C24;
|
| 364 |
+
color: white;
|
| 365 |
+
padding: 20px;
|
| 366 |
+
font-weight: bold;
|
| 367 |
+
font-size: 22px;
|
| 368 |
+
display: flex;
|
| 369 |
+
align-items: center;
|
| 370 |
+
gap: 12px;
|
| 371 |
+
width: 100%;
|
| 372 |
+
box-sizing: border-box;
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
#chat-header img {
|
| 376 |
+
border-radius: 50%;
|
| 377 |
+
width: 40px;
|
| 378 |
+
height: 40px;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
.gr-chatbot {
|
| 382 |
+
flex: 1;
|
| 383 |
+
overflow-y: auto;
|
| 384 |
+
padding: 20px;
|
| 385 |
+
background-color: #f9f9f9;
|
| 386 |
+
border-top: 1px solid #eee;
|
| 387 |
+
border-bottom: 1px solid #eee;
|
| 388 |
+
display: flex;
|
| 389 |
+
flex-direction: column;
|
| 390 |
+
gap: 12px;
|
| 391 |
+
box-sizing: border-box;
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
.gr-textbox {
|
| 395 |
+
padding: 16px 20px;
|
| 396 |
+
background-color: #fff;
|
| 397 |
+
display: flex;
|
| 398 |
+
align-items: center;
|
| 399 |
+
gap: 12px;
|
| 400 |
+
border-top: 1px solid #eee;
|
| 401 |
+
box-sizing: border-box;
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
.gr-textbox textarea {
|
| 405 |
+
flex: 1;
|
| 406 |
+
resize: none;
|
| 407 |
+
padding: 12px;
|
| 408 |
+
background-color: white;
|
| 409 |
+
border: 1px solid #ccc;
|
| 410 |
+
border-radius: 8px;
|
| 411 |
+
font-family: inherit;
|
| 412 |
+
font-size: 16px;
|
| 413 |
+
box-sizing: border-box;
|
| 414 |
+
height: 48px;
|
| 415 |
+
line-height: 1.5;
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
.gr-textbox button {
|
| 419 |
+
background-color: #ED1C24;
|
| 420 |
+
border: none;
|
| 421 |
+
color: white;
|
| 422 |
+
border-radius: 8px;
|
| 423 |
+
padding: 12px 20px;
|
| 424 |
+
cursor: pointer;
|
| 425 |
+
font-weight: bold;
|
| 426 |
+
transition: background-color 0.3s ease;
|
| 427 |
+
font-size: 16px;
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
.gr-textbox button:hover {
|
| 431 |
+
background-color: #c4161c;
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
footer {
|
| 435 |
+
display: none !important;
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
"""
|
| 439 |
+
|
| 440 |
+
panel_visible = False
|
| 441 |
+
|
| 442 |
+
def toggle_panel():
|
| 443 |
+
global panel_visible
|
| 444 |
+
panel_visible = not panel_visible
|
| 445 |
+
return gr.Column(visible=panel_visible)
|
| 446 |
+
|
| 447 |
+
with gr.Blocks(css=custom_css) as demo:
|
| 448 |
+
# Toggle button (floating action button)
|
| 449 |
+
toggle_btn = gr.Button("💬", elem_id="chatbot-toggle-btn")
|
| 450 |
+
|
| 451 |
+
# Chat panel (initially hidden)
|
| 452 |
+
chat_panel = gr.Column(visible=panel_visible, elem_id="chatbot-panel")
|
| 453 |
+
with chat_panel:
|
| 454 |
+
# Chat header
|
| 455 |
+
with gr.Row(elem_id="chat-header"):
|
| 456 |
+
gr.HTML("""
|
| 457 |
+
<div id='chat-header'>
|
| 458 |
+
<img src="https://www.svgrepo.com/download/490283/pixar-lamp.svg" />
|
| 459 |
+
Lofty the TAL Bot
|
| 460 |
+
</div>
|
| 461 |
+
""")
|
| 462 |
+
# Chatbot and input
|
| 463 |
+
chatbot = gr.Chatbot(elem_id="gr-chatbot", type="messages")
|
| 464 |
+
msg = gr.Textbox(placeholder="Type your question here...", elem_id="gr-textbox")
|
| 465 |
+
send = gr.Button("Send")
|
| 466 |
+
# clear = gr.ClearButton([msg, chatbot])
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
# Function to handle messages
|
| 470 |
+
async def respond(message, chat_history):
|
| 471 |
+
response = await handle_query(message)
|
| 472 |
+
# Convert existing history to OpenAI format if it's in tuples
|
| 473 |
+
|
| 474 |
+
# Add new messages
|
| 475 |
+
chat_history.append({"role": "user", "content": message})
|
| 476 |
+
chat_history.append({"role": "assistant", "content": response})
|
| 477 |
+
return "", chat_history
|
| 478 |
+
send.click(respond, [msg, chatbot], [msg,chatbot])
|
| 479 |
+
msg.submit(respond, [msg, chatbot], [msg, chatbot])
|
| 480 |
+
toggle_btn.click(toggle_panel, outputs=chat_panel)
|
| 481 |
+
|
| 482 |
+
demo.launch()
|
improvements.txt
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
remove sql query in chatbot reply upon query failure
|
| 2 |
+
size formatting in reply
|
| 3 |
+
new gradio frontend
|
| 4 |
+
remove unecessary fields in return to save computing power
|
| 5 |
+
List of drivers compatible with only b4 on 350mA?
|
| 6 |
+
List of drivers compatible with only haloled on 350mA and add min and max. qty of luminaires?
|
| 7 |
+
commonly asked questions/queries?
|
| 8 |
+
improve error message
|
| 9 |
+
implement lamp name normalisation in geenrate query too idk how
|
| 10 |
+
lamp name recognition for names like haloled single on track
|
| 11 |
+
User: list of drivers on 350mA that support B4
|
| 12 |
+
[2025-06-13 11:56:59,151 - kernel:229 - INFO] Found 0 converters matching criteria
|
| 13 |
+
[2025-06-13 11:56:59,151 - kernel:89 - INFO] Used get_converters_by_dimming with dimming type: 350mA
|
| 14 |
+
voltage_current: None
|
| 15 |
+
lamp_type: B4
|
| 16 |
+
I'm just a technical assistant for TAL BV. Please let me know if you have any questio
|
| 17 |
+
caching
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
improved scoring based [artial matchig for lamp search]
|
| 21 |
+
|
| 22 |
+
chat memory/ caching for query generation
|
| 23 |
+
|
| 24 |
+
fix generate query logging for errors/badrequests
|
models/__pycache__/converterModels.cpython-311.pyc
ADDED
|
Binary file (4.08 kB). View file
|
|
|
models/__pycache__/converterVectorStoreModels.cpython-311.pyc
ADDED
|
Binary file (5.59 kB). View file
|
|
|
models/converterModels.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# models/converterModels.py
|
| 2 |
+
from pydantic import BaseModel, ConfigDict, Field, field_validator, validator
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
class LampConnections(BaseModel):
|
| 6 |
+
min: float
|
| 7 |
+
max: float
|
| 8 |
+
|
| 9 |
+
@field_validator('min', 'max', mode='before')
|
| 10 |
+
def convert_values(cls, v):
|
| 11 |
+
v_str = str(v)
|
| 12 |
+
v_str = v_str.replace(',', '.')
|
| 13 |
+
return float(v_str)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class VoltageRange(BaseModel):
|
| 17 |
+
min: float
|
| 18 |
+
max: float
|
| 19 |
+
|
| 20 |
+
@field_validator('min', 'max', mode='before')
|
| 21 |
+
def convert_values(cls, v):
|
| 22 |
+
v_str = str(v)
|
| 23 |
+
v_str = v_str.replace(',', '.')
|
| 24 |
+
return float(v_str)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class PowerConverter(BaseModel):
|
| 28 |
+
doc_id: Optional[str] = Field(None, alias="id")
|
| 29 |
+
artnr: Optional[int] = Field(None, alias="artnr")
|
| 30 |
+
ip_rating: Optional[int] = Field(None, alias="ip")
|
| 31 |
+
lamps: Optional[Dict[str, LampConnections]] = Field(default_factory=dict, alias="lamps")
|
| 32 |
+
|
| 33 |
+
type: Optional[str] = Field(None, alias="type")
|
| 34 |
+
name: Optional[str] = Field(None, alias="name")
|
| 35 |
+
efficiency: Optional[float] = Field(None, alias="efficiency_full_load")
|
| 36 |
+
pdf_link: Optional[str] = Field(None, alias="pdf_link")
|
| 37 |
+
converter_description: Optional[str] = Field(None, alias="converter_description")
|
| 38 |
+
|
| 39 |
+
nom_input_voltage: Optional[VoltageRange] = Field(None, alias="nom_input_voltage_v")
|
| 40 |
+
output_voltage:Optional[VoltageRange]= Field(None, alias="output_voltage_v")
|
| 41 |
+
|
| 42 |
+
unit: Optional[str] = Field(None, alias="unit")
|
| 43 |
+
price: Optional[float] = Field(None, alias="listprice")
|
| 44 |
+
life_cycle: Optional[str] = Field(None, alias="lifecycle")
|
| 45 |
+
size: Optional[str] = Field(None, alias="size")
|
| 46 |
+
ccr_amplitude: Optional[str] = Field(None, alias="ccr_amplitude")
|
| 47 |
+
dimmability: Optional[str] = Field(None, alias="dimmability")
|
| 48 |
+
|
| 49 |
+
strain_relief: Optional[str] = Field(None, alias="strain_relief")
|
| 50 |
+
gross_weight: Optional[float] = Field(None, alias="gross_weight")
|
| 51 |
+
|
| 52 |
+
model_config = ConfigDict(
|
| 53 |
+
populate_by_name=True, # Critical fix
|
| 54 |
+
|
| 55 |
+
extra="ignore"
|
| 56 |
+
)
|
plugins/__pycache__/chatMemoryPlugin.cpython-311.pyc
ADDED
|
Binary file (4.12 kB). View file
|
|
|
plugins/__pycache__/converterPlugin.cpython-311.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
plugins/__pycache__/sqlGenerationPlugin.cpython-311.pyc
ADDED
|
Binary file (3.78 kB). View file
|
|
|
plugins/chatMemoryPlugin.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#converterPlugin.py
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
from typing import Annotated, Dict, List, Optional
|
| 4 |
+
import uuid
|
| 5 |
+
from CosmosDBHandlers.cosmosConnector import CosmosLampHandler
|
| 6 |
+
from semantic_kernel.functions import kernel_function
|
| 7 |
+
from CosmosDBHandlers.cosmosChatHistoryHandler import ChatMemoryHandler
|
| 8 |
+
class ChatMemoryPlugin:
|
| 9 |
+
def __init__(self, logger):
|
| 10 |
+
self.logger = logger
|
| 11 |
+
self.chat_memory_handler = ChatMemoryHandler(logger)
|
| 12 |
+
|
| 13 |
+
@kernel_function(name="log_interaction", description="Logs chat interactions")
|
| 14 |
+
async def log_interaction(self, session_id: str, question: str, function_used: str, answer: str):
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
await self.chat_memory_handler.log_interaction(session_id=session_id,
|
| 18 |
+
question=question,
|
| 19 |
+
function_used=function_used,
|
| 20 |
+
answer=answer)
|
| 21 |
+
except Exception as e:
|
| 22 |
+
self.logger.error(f"Failed to log chat interaction: {str(e)}")
|
| 23 |
+
|
| 24 |
+
@kernel_function(name="log_sql_query", description="Logs generated SQL queries")
|
| 25 |
+
async def log_sql_query(self, original_question: str, generated_sql: str):
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
await self.chat_memory_handler.log_sql_query(original_question=original_question,
|
| 29 |
+
generated_sql=generated_sql)
|
| 30 |
+
except Exception as e:
|
| 31 |
+
self.logger.error(f"Failed to log SQL query: {str(e)}")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
async def get_semantic_faqs(self, limit:int=5, threshold: float = 0.1) -> List[Dict]:
|
| 35 |
+
"""Retrieve FAQs using vector embeddings for semantic similarity"""
|
| 36 |
+
try:
|
| 37 |
+
faqs_dict = await self.chat_memory_handler.get_semantic_faqs(limit=limit, threshold=threshold)
|
| 38 |
+
faqs = [faq["representative_question"] for faq in faqs_dict]
|
| 39 |
+
self.logger.info(faqs)
|
| 40 |
+
except Exception as e:
|
| 41 |
+
self.logger.error(f"Semantic FAQ retrieval failed: {str(e)}")
|
| 42 |
+
return []
|
plugins/converterPlugin.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#converterPlugin.py
|
| 2 |
+
from typing import Annotated, Optional
|
| 3 |
+
from CosmosDBHandlers.cosmosConnector import CosmosLampHandler
|
| 4 |
+
from semantic_kernel.functions import kernel_function
|
| 5 |
+
|
| 6 |
+
class ConverterPlugin:
|
| 7 |
+
def __init__(self, logger):
|
| 8 |
+
self.logger = logger
|
| 9 |
+
self.db = CosmosLampHandler(logger=logger)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@kernel_function(
|
| 13 |
+
name="query_converters",
|
| 14 |
+
description="Execute SQL query against Cosmos DB converters collection"
|
| 15 |
+
)
|
| 16 |
+
async def query_converters(self, query: str) -> str:
|
| 17 |
+
try:
|
| 18 |
+
items = await self.db.query_converters(query)
|
| 19 |
+
self.logger.info(f"Executed query: {query}")
|
| 20 |
+
if not items:
|
| 21 |
+
return "No items found for the given query."
|
| 22 |
+
return str(items)
|
| 23 |
+
except Exception as e:
|
| 24 |
+
return f"Query failed: {str(e)}"
|
| 25 |
+
|
| 26 |
+
@kernel_function(
|
| 27 |
+
name = "get_converter_info",
|
| 28 |
+
description="Get information about a converter using its artnr (partition key)"
|
| 29 |
+
)
|
| 30 |
+
async def get_converter_info(self, artnr:int) -> str:
|
| 31 |
+
try:
|
| 32 |
+
converter = await self.db.get_converter_info(artnr)
|
| 33 |
+
self.logger.info(f"Used get_converter_info with artrn: {artnr}")
|
| 34 |
+
return f"{converter.model_dump()})"
|
| 35 |
+
except Exception as e:
|
| 36 |
+
f"Failed to retrieve converter {artnr} - {e}"
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@kernel_function
|
| 41 |
+
async def get_compatible_lamps(
|
| 42 |
+
self,
|
| 43 |
+
artnr: Annotated[int, "Converter artnr (partition key)"]
|
| 44 |
+
) -> str:
|
| 45 |
+
"""Get compatible lamps for a converter by artnr"""
|
| 46 |
+
try:
|
| 47 |
+
lamps = await self.db.get_compatible_lamps(artnr)
|
| 48 |
+
self.logger.info(f"Used get_compatible_lamps with artnr: {artnr}")
|
| 49 |
+
return f"Compatible lamps: {', '.join(lamps)}" if lamps else "No lamps found"
|
| 50 |
+
except Exception as e:
|
| 51 |
+
return f"Error retrieving compatible lamps: {str(e)}"
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@kernel_function(
|
| 55 |
+
name="get_converters_by_lamp_type",
|
| 56 |
+
description="Find converters compatible with a specific lamp type"
|
| 57 |
+
)
|
| 58 |
+
async def get_converters_by_lamp_type(
|
| 59 |
+
self,
|
| 60 |
+
lamp_type: Annotated[str, "Lamp model (e.g., Haloled, B4)"]
|
| 61 |
+
) -> str:
|
| 62 |
+
"""Find converters compatible with specific lamp type"""
|
| 63 |
+
try:
|
| 64 |
+
converters = await self.db.get_converters_by_lamp_type(lamp_type)
|
| 65 |
+
self.logger.info(f"Used get_converters_by_lamp_type with lamp_type: {lamp_type}")
|
| 66 |
+
if not converters:
|
| 67 |
+
return "No compatible converters found"
|
| 68 |
+
return "\n".join([f"{c.model_dump()})" for c in converters]) if converters else "No converters found"
|
| 69 |
+
except Exception as e:
|
| 70 |
+
return f"Error retrieving converters: {str(e)}"
|
| 71 |
+
|
| 72 |
+
@kernel_function(
|
| 73 |
+
name="get_converters_by_dimming",
|
| 74 |
+
description="Find converters of a specified dimming type"
|
| 75 |
+
)
|
| 76 |
+
async def get_converters_by_dimming(
|
| 77 |
+
self,
|
| 78 |
+
dimming_type: Annotated[str, "Dimming type mentioned like dali, mains, 1-10v"],
|
| 79 |
+
voltage_current: Annotated[str | None,"Voltage or current specification like 350mA, 24V DC"] = None,
|
| 80 |
+
lamp_type: Annotated[str | None, "Lamp model (e.g., Haloled, B4)"] = None,
|
| 81 |
+
threshold: int = 75) -> str:
|
| 82 |
+
"""Search converters by dimming type with technical specifications"""
|
| 83 |
+
try:
|
| 84 |
+
converters = await self.db.get_converters_by_dimming(
|
| 85 |
+
dimming_type=dimming_type,
|
| 86 |
+
voltage_current=voltage_current,
|
| 87 |
+
lamp_type=lamp_type,
|
| 88 |
+
threshold=threshold)
|
| 89 |
+
self.logger.info(f"""Used get_converters_by_dimming with dimming type: {dimming_type}
|
| 90 |
+
voltage_current: {voltage_current}
|
| 91 |
+
lamp_type: {lamp_type}""")
|
| 92 |
+
if not converters:
|
| 93 |
+
return "No relavent converters found"
|
| 94 |
+
return "\n".join([f"{c.model_dump()})" for c in converters]) if converters else "No converters found"
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
except Exception as e:
|
| 98 |
+
return f"Error returning converters: {str(e)}"
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
@kernel_function(
|
| 102 |
+
name="get_lamp_limits",
|
| 103 |
+
description="Get min/max lamps for a converter by artnr and lamp type"
|
| 104 |
+
)
|
| 105 |
+
async def get_lamp_limits(
|
| 106 |
+
self,
|
| 107 |
+
artnr: Annotated[int, "Converter artnr"],
|
| 108 |
+
lamp_type: Annotated[str, "Lamp model (e.g., Haloled)"]
|
| 109 |
+
) -> str:
|
| 110 |
+
"""Get min/max lamps for a converter"""
|
| 111 |
+
try:
|
| 112 |
+
limits = await self.db.get_lamp_limits(artnr, lamp_type)
|
| 113 |
+
self.logger.info(f"Used get_lamp_limits with ARTNR: {artnr} and lamp_type: {lamp_type}")
|
| 114 |
+
return f"{lamp_type}: Min {limits['min']} - Max {limits['max']} lamps"
|
| 115 |
+
except Exception as e:
|
| 116 |
+
return f"Error retrieving lamp limits: {str(e)}"
|
| 117 |
+
|
| 118 |
+
@kernel_function(
|
| 119 |
+
name="get_converters_by_voltage",
|
| 120 |
+
description="Get converters that have the mentioned input/output voltage range or current"
|
| 121 |
+
)
|
| 122 |
+
async def get_converters_by_voltage_current(
|
| 123 |
+
self,
|
| 124 |
+
artnr: Annotated[int, ""] = None,
|
| 125 |
+
current: Annotated[str, "Current like 350mA, 700mA"]=None,
|
| 126 |
+
input_voltage: Annotated[str, "Input voltage range like '198-464' NEVER ip"] = None,
|
| 127 |
+
output_voltage: Annotated[str, "Output voltage range like '24', '2-25'"] = None
|
| 128 |
+
) -> str:
|
| 129 |
+
try:
|
| 130 |
+
converters = await self.db.get_converters_by_voltage_current(artnr=artnr,
|
| 131 |
+
current=current,
|
| 132 |
+
input_voltage=input_voltage,
|
| 133 |
+
output_voltage=output_voltage)
|
| 134 |
+
self.logger.info(f"""Used get_converters_by_voltage_current with input_voltage: {input_voltage}
|
| 135 |
+
output_voltage: {output_voltage}
|
| 136 |
+
current: {current}
|
| 137 |
+
artnr: {artnr}""")
|
| 138 |
+
if not converters:
|
| 139 |
+
return "No relavent converters found"
|
| 140 |
+
return "\n".join([f"{c.model_dump()})" for c in converters]) if converters else "No converters found"
|
| 141 |
+
|
| 142 |
+
except Exception as e:
|
| 143 |
+
return f"Error retrieving converters"
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
semantic-kernel
|
| 2 |
+
azure-cosmos
|
| 3 |
+
rapidfuzz
|