subashpoudel commited on
Commit
3f5fe23
·
1 Parent(s): 93d50e5

Implemented caching'

Browse files
.gitignore CHANGED
@@ -6,4 +6,5 @@ logs
6
  delete_pycache.py
7
  docker_file_for_actions.txt
8
  vercel.json
9
- .github/workflows/actions_cicd.txt
 
 
6
  delete_pycache.py
7
  docker_file_for_actions.txt
8
  vercel.json
9
+ .github/workflows/actions_cicd.txt
10
+ check.py
api/routers/analytics_chatbot.py CHANGED
@@ -1,7 +1,10 @@
1
  import ast
 
2
  import json
 
3
  import logging
4
  from fastapi import APIRouter
 
5
  from fastapi.responses import StreamingResponse
6
  from api.stored_data import stored_data
7
  from src.genai.analytics_chatbot.agent import ChatbotAgent
@@ -10,20 +13,39 @@ from api.schemas.analytics_chatbot import UserMessage
10
 
11
  app_logger = logging.getLogger("app_logger")
12
  error_logger = logging.getLogger("error_logger")
 
 
 
 
13
 
14
  router = APIRouter()
15
  agent=ChatbotAgent()
16
- graph = agent.chatbot_graph()
 
17
 
18
  @router.get("/analytics-chatbot")
19
  def get_analytics(msg:str):
20
  # user_query=process_query(msg)
 
 
 
 
 
 
 
21
  config={"configurable": {"thread_id": "analytics-chatbot-thread"},"run_name":"analytics-chatbot"}
22
  result=graph.invoke({'messages':msg},config=config)
 
 
23
  if result.get('backup_data') is not None:
24
- return {'backup_response': result['backup_data']}
25
  else:
26
- return {
27
  'response': result['response'],
28
  'endpoint': result['endpoint']
29
  }
 
 
 
 
 
 
1
  import ast
2
+ import os
3
  import json
4
+ import hashlib
5
  import logging
6
  from fastapi import APIRouter
7
+ from redis import Redis
8
  from fastapi.responses import StreamingResponse
9
  from api.stored_data import stored_data
10
  from src.genai.analytics_chatbot.agent import ChatbotAgent
 
13
 
14
  app_logger = logging.getLogger("app_logger")
15
  error_logger = logging.getLogger("error_logger")
16
+ password=os.environ['REDIS_PASSWORD']
17
+ host = os.environ['REDIS_HOST']
18
+ port = os.environ['REDIS_PORT']
19
+ print('password:', password)
20
 
21
  router = APIRouter()
22
  agent=ChatbotAgent()
23
+ graph = agent.chatbot_graph()
24
+ redis_client = Redis(host=host, port=port, db=0, decode_responses=True, username="default",password=password)
25
 
26
  @router.get("/analytics-chatbot")
27
  def get_analytics(msg:str):
28
  # user_query=process_query(msg)
29
+ cache_key = f"analytics:{hashlib.md5(msg.encode()).hexdigest()}"
30
+
31
+ cached_response = redis_client.get(cache_key)
32
+ if cached_response:
33
+ print("Cache hit")
34
+ return json.loads(cached_response)
35
+
36
  config={"configurable": {"thread_id": "analytics-chatbot-thread"},"run_name":"analytics-chatbot"}
37
  result=graph.invoke({'messages':msg},config=config)
38
+ print('Final Result:', result)
39
+ response_to_cache = {}
40
  if result.get('backup_data') is not None:
41
+ response_to_cache = {'backup_response': result['backup_data']}
42
  else:
43
+ response_to_cache = {
44
  'response': result['response'],
45
  'endpoint': result['endpoint']
46
  }
47
+
48
+ # Save to Redis with TTL (e.g., 1 hour)
49
+ redis_client.set(cache_key, json.dumps(response_to_cache), ex=3600) # ex=seconds
50
+
51
+ return response_to_cache
app.py CHANGED
@@ -1,13 +1,12 @@
1
  from fastapi import FastAPI, Request
2
  from logger_config import setup_loggers
3
  import logging
4
- from api.routers import orchestration, context_analysis, ideation , human_idea_refining , brainstorm , generate_final_story , generate_image, show_analytics, analytics_chatbot
5
 
6
  setup_loggers()
7
  app = FastAPI()
8
 
9
 
10
- # Get loggers
11
  app_logger = logging.getLogger("app_logger")
12
  error_logger = logging.getLogger("error_logger")
13
  access_logger = logging.getLogger("access_logger")
 
1
  from fastapi import FastAPI, Request
2
  from logger_config import setup_loggers
3
  import logging
4
+ from api.routers import orchestration, context_analysis, ideation , brainstorm , generate_final_story , generate_image, show_analytics, analytics_chatbot
5
 
6
  setup_loggers()
7
  app = FastAPI()
8
 
9
 
 
10
  app_logger = logging.getLogger("app_logger")
11
  error_logger = logging.getLogger("error_logger")
12
  access_logger = logging.getLogger("access_logger")
logs/access.log CHANGED
@@ -1509,3 +1509,31 @@
1509
  2025-10-17 13:54:02,382 | INFO | access_logger | app.py:21 | Response status: 200
1510
  2025-10-17 13:54:26,472 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1511
  2025-10-17 13:54:36,464 | INFO | access_logger | app.py:21 | Response status: 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1509
  2025-10-17 13:54:02,382 | INFO | access_logger | app.py:21 | Response status: 200
1510
  2025-10-17 13:54:26,472 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1511
  2025-10-17 13:54:36,464 | INFO | access_logger | app.py:21 | Response status: 200
1512
+ 2025-10-17 15:46:54,304 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1513
+ 2025-10-17 15:47:04,686 | INFO | access_logger | app.py:21 | Response status: 200
1514
+ 2025-10-17 15:47:10,111 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1515
+ 2025-10-17 15:47:16,471 | INFO | access_logger | app.py:21 | Response status: 200
1516
+ 2025-10-17 15:49:07,624 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1517
+ 2025-10-17 15:49:12,250 | INFO | access_logger | app.py:21 | Response status: 200
1518
+ 2025-10-17 15:49:16,022 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1519
+ 2025-10-17 15:49:21,139 | INFO | access_logger | app.py:21 | Response status: 200
1520
+ 2025-10-17 15:51:43,342 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1521
+ 2025-10-17 15:51:47,715 | INFO | access_logger | app.py:21 | Response status: 200
1522
+ 2025-10-17 15:51:51,793 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1523
+ 2025-10-17 15:51:56,726 | INFO | access_logger | app.py:21 | Response status: 200
1524
+ 2025-10-17 16:00:44,222 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1525
+ 2025-10-17 16:00:55,100 | INFO | access_logger | app.py:21 | Response status: 200
1526
+ 2025-10-17 16:00:59,383 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1527
+ 2025-10-17 16:01:04,084 | INFO | access_logger | app.py:21 | Response status: 200
1528
+ 2025-10-17 16:03:18,574 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1529
+ 2025-10-17 16:05:48,001 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1530
+ 2025-10-17 16:05:52,416 | INFO | access_logger | app.py:21 | Response status: 200
1531
+ 2025-10-17 16:06:13,136 | INFO | access_logger | app.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=i%20want%20the%20analytics%20of%20last%2010%20days%20of%20muna%20chiya
1532
+ 2025-10-17 16:06:17,871 | INFO | access_logger | app.py:21 | Response status: 200
1533
+ 2025-10-26 15:27:54,555 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20sentiment%20distribution%20of%20divya%20dhakal
1534
+ 2025-10-26 15:34:02,449 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20sentiment%20distribution%20of%20divya%20dhakal
1535
+ 2025-10-26 15:34:21,305 | INFO | access_logger | app.py:20 | Response status: 200
1536
+ 2025-10-26 15:34:25,929 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20sentiment%20distribution%20of%20divya%20dhakal
1537
+ 2025-10-26 15:34:26,248 | INFO | access_logger | app.py:20 | Response status: 200
1538
+ 2025-10-26 15:41:29,547 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20sentiment%20distribution%20of%20divya%20dhakal
1539
+ 2025-10-26 15:41:32,818 | INFO | access_logger | app.py:20 | Response status: 200
requirements.txt CHANGED
@@ -1,26 +1,26 @@
1
  langgraph==0.6.5
2
- langsmith
3
- langchain_groq
4
  pydantic==2.11.7
5
- datasets
6
- faiss-cpu
7
- dotenv
8
- fastapi
9
- uvicorn
10
- numpy
11
  pandas
12
- langchain_huggingface
13
- python-multipart
14
- langmem
15
- streamlit
16
- requests
17
- langchain_openai
18
- nltk
19
- scikit-learn
20
- pandas
21
- langchain-community
22
- tiktoken
23
- langchain-anthropic
24
- pytest
25
- langchain_google_genai
26
  mangum
 
 
1
  langgraph==0.6.5
2
+ langsmith==0.4.16
3
+ langchain_groq==0.3.7
4
  pydantic==2.11.7
5
+ datasets==4.0.0
6
+ faiss-cpu==1.12.0
7
+ dotenv==0.9.9
8
+ fastapi==0.116.1
9
+ uvicorn==0.36.0
10
+ numpy==2.3.2
11
  pandas
12
+ langchain_huggingface==0.3.1
13
+ python-multipart==0.0.20
14
+ langmem==0.0.29
15
+ streamlit==1.48.1
16
+ requests==2.32.5
17
+ langchain_openai==0.3.31
18
+ nltk==3.9.1
19
+ scikit-learn==1.7.1
20
+ langchain-community==0.3.27
21
+ tiktoken==0.11.0
22
+ langchain-anthropic==0.3.19
23
+ pytest==8.4.1
24
+ langchain_google_genai==2.1.9
 
25
  mangum
26
+ redis==7.0.0
src/genai/analytics_chatbot/agent.py CHANGED
@@ -3,18 +3,25 @@ from langgraph.checkpoint.memory import MemorySaver
3
  from .utils.state import State
4
  from .utils.nodes import FetchDataNode , FetchLastMessage , RetrievePossibleEndpoints , FetchParametersNode , RetrieveExactEndpoint, BackupRetrievalNode, RoutingNode
5
 
 
 
 
 
 
 
 
6
  class ChatbotAgent:
7
  def __init__(self):
8
  self.memory = MemorySaver()
9
 
10
  def chatbot_graph(self):
11
  graph_builder= StateGraph(State)
12
- graph_builder.add_node("fetch_last_message", FetchLastMessage().run)
13
- graph_builder.add_node("retrieve_api_endpoints", RetrievePossibleEndpoints().run)
14
- graph_builder.add_node("retrieve_exact_endpoint", RetrieveExactEndpoint().run)
15
- graph_builder.add_node("fetch_parameters", FetchParametersNode().run)
16
- graph_builder.add_node("fetch_data", FetchDataNode().run)
17
- graph_builder.add_node("backup_response", BackupRetrievalNode().run)
18
 
19
  graph_builder.add_edge(START, "fetch_last_message")
20
  graph_builder.add_edge("fetch_last_message", 'retrieve_api_endpoints')
 
3
  from .utils.state import State
4
  from .utils.nodes import FetchDataNode , FetchLastMessage , RetrievePossibleEndpoints , FetchParametersNode , RetrieveExactEndpoint, BackupRetrievalNode, RoutingNode
5
 
6
+ fetch_last_message_node = FetchLastMessage()
7
+ retrieve_api_endpoints_node = RetrievePossibleEndpoints()
8
+ retrieve_exact_endpoint_node = RetrieveExactEndpoint()
9
+ fetch_parameters_node = FetchParametersNode()
10
+ fetch_data_node = FetchDataNode()
11
+ backup_response_node = BackupRetrievalNode()
12
+
13
  class ChatbotAgent:
14
  def __init__(self):
15
  self.memory = MemorySaver()
16
 
17
  def chatbot_graph(self):
18
  graph_builder= StateGraph(State)
19
+ graph_builder.add_node("fetch_last_message", fetch_last_message_node.run)
20
+ graph_builder.add_node("retrieve_api_endpoints", retrieve_api_endpoints_node.run)
21
+ graph_builder.add_node("retrieve_exact_endpoint", retrieve_exact_endpoint_node.run)
22
+ graph_builder.add_node("fetch_parameters", fetch_parameters_node.run)
23
+ graph_builder.add_node("fetch_data", fetch_data_node.run)
24
+ graph_builder.add_node("backup_response", backup_response_node.run)
25
 
26
  graph_builder.add_edge(START, "fetch_last_message")
27
  graph_builder.add_edge("fetch_last_message", 'retrieve_api_endpoints')
src/genai/analytics_chatbot/utils/nodes.py CHANGED
@@ -35,6 +35,7 @@ class RetrievePossibleEndpoints:
35
  self.results = []
36
 
37
  def run(self,state:State):
 
38
  query_embedding = np.array(embedding_model.embed_query(state['latest_message'])).reshape(1, -1).astype('float32')
39
  distances, indices = self.index.search(query_embedding, 3)
40
  for idx in indices[0]:
@@ -51,6 +52,7 @@ class RetrieveExactEndpoint:
51
  self.llm = llm_gpt
52
 
53
  def run(self,state:State):
 
54
  messages = [SystemMessage(content=fetch_endpoint_prompt),
55
  FunctionMessage(name='possible_endpoints',content=f'''The possible endpoints are: {state['possible_endpoints']}'''),
56
  HumanMessage(content=f'''The user query is: {state['latest_message']}''')]
 
35
  self.results = []
36
 
37
  def run(self,state:State):
38
+ print('Gone to retrieve possible endpoints')
39
  query_embedding = np.array(embedding_model.embed_query(state['latest_message'])).reshape(1, -1).astype('float32')
40
  distances, indices = self.index.search(query_embedding, 3)
41
  for idx in indices[0]:
 
52
  self.llm = llm_gpt
53
 
54
  def run(self,state:State):
55
+ print('Gone to retrieve exact endpoint')
56
  messages = [SystemMessage(content=fetch_endpoint_prompt),
57
  FunctionMessage(name='possible_endpoints',content=f'''The possible endpoints are: {state['possible_endpoints']}'''),
58
  HumanMessage(content=f'''The user query is: {state['latest_message']}''')]
src/genai/utils/models_loader.py CHANGED
@@ -23,7 +23,8 @@ llm_groq = ChatGroq(model="llama-3.3-70b-versatile",temperature=0)
23
  llm_gpt_small = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
24
  llm_gpt = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
25
  llm_gpt_high = ChatOpenAI(model="gpt-5-nano",temperature=0.5)
26
- encoding_model = tiktoken.encoding_for_model('gpt-4o-mini')
 
27
 
28
 
29
  captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct"
 
23
  llm_gpt_small = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
24
  llm_gpt = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
25
  llm_gpt_high = ChatOpenAI(model="gpt-5-nano",temperature=0.5)
26
+ # encoding_model = tiktoken.encoding_for_model('gpt-4o-mini')
27
+ encoding_model = 'demo'
28
 
29
 
30
  captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct"