subashpoudel commited on
Commit
ae75807
·
1 Parent(s): 1a6859b

next commit

Browse files
.gitignore CHANGED
@@ -2,7 +2,8 @@
2
  myenv
3
  *.pyc
4
  __pycache__/
5
- logs
 
6
  delete_pycache.py
7
  docker_file_for_actions.txt
8
  vercel.json
 
2
  myenv
3
  *.pyc
4
  __pycache__/
5
+ logs/
6
+ research/
7
  delete_pycache.py
8
  docker_file_for_actions.txt
9
  vercel.json
api/main.py CHANGED
@@ -1,5 +1,5 @@
1
  from fastapi import FastAPI, Request
2
- from logger_config import setup_loggers
3
  import logging
4
  from .routers import orchestration, context_analysis, ideation , human_idea_refining , brainstorm , generate_final_story , generate_image, show_analytics, analytics_chatbot
5
 
 
1
  from fastapi import FastAPI, Request
2
+ from config.logger_config import setup_loggers
3
  import logging
4
  from .routers import orchestration, context_analysis, ideation , human_idea_refining , brainstorm , generate_final_story , generate_image, show_analytics, analytics_chatbot
5
 
api/routers/analytics_chatbot.py CHANGED
@@ -26,38 +26,69 @@ agent=ChatbotAgent()
26
  graph = agent.chatbot_graph()
27
 
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  @router.post("/analytics-chatbot")
30
  def get_analytics(request: AnalyticsChatRequest):
31
  user_query = process_query(request.msg)
32
- print('Processed user query:', user_query)
33
-
34
- cache_key = f"analytics:{hashlib.md5(user_query.encode()).hexdigest()}"
35
- cached_response = redis_client.get(cache_key)
36
- print('cached-response:', cached_response)
 
 
 
 
 
 
37
 
38
- if cached_response:
39
- response_to_cache = json.loads(cached_response)
 
40
  else:
41
- response_to_cache = {}
42
-
43
- if not response_to_cache.get('response') or not response_to_cache.get('endpoint'):
44
- config = {"configurable": {"thread_id": "analytics-chatbot-thread"},
45
- "run_name": "analytics-chatbot"}
46
- result = graph.invoke({'messages': user_query}, config=config)
47
-
48
- if result.get('backup_data') is not None:
49
- response_to_cache['backup_response'] = result['backup_data']
50
- else:
51
- response_to_cache['response'] = result['response']
52
- response_to_cache['endpoint'] = result['endpoint']
53
-
54
- if request.image_base64 and not response_to_cache.get('description'):
55
- description = generate_analytics_description(user_query, request.image_base64)
56
- if description is not None:
57
- response_to_cache['description'] = description
58
-
59
- redis_client.set(cache_key, json.dumps(response_to_cache), ex=3000)
60
- return response_to_cache
61
 
62
 
63
 
 
26
  graph = agent.chatbot_graph()
27
 
28
 
29
+ # @router.post("/analytics-chatbot")
30
+ # def get_analytics(request: AnalyticsChatRequest):
31
+ # user_query = process_query(request.msg)
32
+ # print('Processed user query:', user_query)
33
+
34
+ # cache_key = f"analytics:{hashlib.md5(user_query.encode()).hexdigest()}"
35
+ # cached_response = redis_client.get(cache_key)
36
+ # print('cached-response:', cached_response)
37
+
38
+ # if cached_response:
39
+ # response_to_cache = json.loads(cached_response)
40
+ # else:
41
+ # response_to_cache = {}
42
+
43
+ # if not response_to_cache.get('response') or not response_to_cache.get('endpoint'):
44
+ # config = {"configurable": {"thread_id": "analytics-chatbot-thread"},
45
+ # "run_name": "analytics-chatbot"}
46
+ # result = graph.invoke({'messages': user_query}, config=config)
47
+
48
+ # if result.get('backup_data') is not None:
49
+ # response_to_cache['backup_response'] = result['backup_data']
50
+ # else:
51
+ # response_to_cache['response'] = result['response']
52
+ # response_to_cache['endpoint'] = result['endpoint']
53
+
54
+ # if request.image_base64 and not response_to_cache.get('description'):
55
+ # description = generate_analytics_description(user_query, request.image_base64)
56
+ # if description is not None:
57
+ # response_to_cache['description'] = description
58
+
59
+ # redis_client.set(cache_key, json.dumps(response_to_cache), ex=3000)
60
+ # return response_to_cache
61
+
62
  @router.post("/analytics-chatbot")
63
  def get_analytics(request: AnalyticsChatRequest):
64
  user_query = process_query(request.msg)
65
+ print("Processed user query:", user_query)
66
+
67
+ response_to_return = {}
68
+
69
+ # Always invoke the graph (no caching)
70
+ config = {
71
+ "configurable": {"thread_id": "analytics-chatbot-thread"},
72
+ "run_name": "analytics-chatbot"
73
+ }
74
+
75
+ result = graph.invoke({'messages': user_query}, config=config)
76
 
77
+ # Handle primary vs backup response
78
+ if result.get('backup_data') is not None:
79
+ response_to_return['backup_response'] = result['backup_data']
80
  else:
81
+ response_to_return['response'] = result.get('response')
82
+ # response_to_return['endpoint'] = result.get('endpoint')
83
+
84
+ # # Handle image description if image is provided
85
+ # if request.image_base64:
86
+ # description = generate_analytics_description(user_query, request.image_base64)
87
+ # if description is not None:
88
+ # response_to_return['description'] = description
89
+
90
+ return response_to_return
91
+
 
 
 
 
 
 
 
 
 
92
 
93
 
94
 
api/schemas/analytics_chatbot.py CHANGED
@@ -2,5 +2,4 @@ from pydantic import BaseModel
2
  from typing import Optional
3
 
4
  class AnalyticsChatRequest(BaseModel):
5
- msg: str
6
- image_base64:Optional[str] = None
 
2
  from typing import Optional
3
 
4
  class AnalyticsChatRequest(BaseModel):
5
+ msg: str
 
logs/access.log CHANGED
@@ -1847,3 +1847,67 @@
1847
  2025-11-18 13:36:20,855 | INFO | access_logger | app.py:20 | Response status: 200
1848
  2025-11-18 13:36:37,986 | INFO | access_logger | app.py:18 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1849
  2025-11-18 13:36:43,906 | INFO | access_logger | app.py:20 | Response status: 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847
  2025-11-18 13:36:20,855 | INFO | access_logger | app.py:20 | Response status: 200
1848
  2025-11-18 13:36:37,986 | INFO | access_logger | app.py:18 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1849
  2025-11-18 13:36:43,906 | INFO | access_logger | app.py:20 | Response status: 200
1850
+ 2026-01-27 11:48:27,640 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/
1851
+ 2026-01-27 11:48:27,641 | INFO | access_logger | api/main.py:21 | Response status: 200
1852
+ 2026-01-27 11:48:27,650 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/favicon.ico
1853
+ 2026-01-27 11:48:27,650 | INFO | access_logger | api/main.py:21 | Response status: 404
1854
+ 2026-01-27 11:48:31,040 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/api/docs/
1855
+ 2026-01-27 11:48:31,040 | INFO | access_logger | api/main.py:21 | Response status: 404
1856
+ 2026-01-27 11:48:31,638 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1857
+ 2026-01-27 11:48:31,638 | INFO | access_logger | api/main.py:21 | Response status: 200
1858
+ 2026-01-27 11:48:32,047 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1859
+ 2026-01-27 11:48:32,055 | INFO | access_logger | api/main.py:21 | Response status: 200
1860
+ 2026-01-27 11:51:41,870 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1861
+ 2026-01-27 11:51:41,871 | INFO | access_logger | api/main.py:21 | Response status: 200
1862
+ 2026-01-27 11:51:41,915 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1863
+ 2026-01-27 11:51:41,922 | INFO | access_logger | api/main.py:21 | Response status: 200
1864
+ 2026-01-27 11:52:25,526 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1865
+ 2026-01-27 11:55:51,806 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1866
+ 2026-01-27 11:57:12,540 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1867
+ 2026-01-27 11:57:18,353 | INFO | access_logger | api/main.py:21 | Response status: 200
1868
+ 2026-01-27 11:59:38,797 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1869
+ 2026-01-27 11:59:43,649 | INFO | access_logger | api/main.py:21 | Response status: 200
1870
+ 2026-01-28 12:36:17,287 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/
1871
+ 2026-01-28 12:36:17,288 | INFO | access_logger | api/main.py:21 | Response status: 200
1872
+ 2026-01-28 12:36:20,280 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/api/docs/
1873
+ 2026-01-28 12:36:20,280 | INFO | access_logger | api/main.py:21 | Response status: 404
1874
+ 2026-01-28 12:36:21,692 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/api/docs/
1875
+ 2026-01-28 12:36:21,693 | INFO | access_logger | api/main.py:21 | Response status: 404
1876
+ 2026-01-28 12:36:29,727 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs/
1877
+ 2026-01-28 12:36:29,728 | INFO | access_logger | api/main.py:21 | Response status: 307
1878
+ 2026-01-28 12:36:29,959 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1879
+ 2026-01-28 12:36:29,960 | INFO | access_logger | api/main.py:21 | Response status: 200
1880
+ 2026-01-28 12:36:30,354 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1881
+ 2026-01-28 12:36:30,363 | INFO | access_logger | api/main.py:21 | Response status: 200
1882
+ 2026-01-28 12:39:19,755 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1883
+ 2026-01-28 12:39:26,554 | INFO | access_logger | api/main.py:21 | Response status: 200
1884
+ 2026-01-28 12:42:13,063 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1885
+ 2026-01-28 12:42:18,846 | INFO | access_logger | api/main.py:21 | Response status: 200
1886
+ 2026-01-28 12:43:13,014 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1887
+ 2026-01-28 12:43:18,591 | INFO | access_logger | api/main.py:21 | Response status: 200
1888
+ 2026-01-28 12:44:46,720 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1889
+ 2026-01-28 12:44:51,546 | INFO | access_logger | api/main.py:21 | Response status: 200
1890
+ 2026-01-28 12:46:30,957 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1891
+ 2026-01-28 12:46:38,936 | INFO | access_logger | api/main.py:21 | Response status: 200
1892
+ 2026-01-28 12:47:27,890 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1893
+ 2026-01-28 12:47:32,087 | INFO | access_logger | api/main.py:21 | Response status: 200
1894
+ 2026-01-28 12:48:28,043 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/api/docs/
1895
+ 2026-01-28 12:48:28,044 | INFO | access_logger | api/main.py:21 | Response status: 404
1896
+ 2026-01-28 12:48:31,941 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1897
+ 2026-01-28 12:48:31,942 | INFO | access_logger | api/main.py:21 | Response status: 200
1898
+ 2026-01-28 12:48:32,004 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1899
+ 2026-01-28 12:48:32,011 | INFO | access_logger | api/main.py:21 | Response status: 200
1900
+ 2026-01-28 12:49:32,758 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1901
+ 2026-01-28 12:49:38,290 | INFO | access_logger | api/main.py:21 | Response status: 200
1902
+ 2026-01-28 12:50:13,517 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/api/docs/
1903
+ 2026-01-28 12:50:13,519 | INFO | access_logger | api/main.py:21 | Response status: 404
1904
+ 2026-01-28 12:50:16,334 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1905
+ 2026-01-28 12:50:16,334 | INFO | access_logger | api/main.py:21 | Response status: 200
1906
+ 2026-01-28 12:50:16,379 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1907
+ 2026-01-28 12:50:16,380 | INFO | access_logger | api/main.py:21 | Response status: 200
1908
+ 2026-01-28 12:50:46,245 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1909
+ 2026-01-28 12:50:49,854 | INFO | access_logger | api/main.py:21 | Response status: 200
1910
+ 2026-01-28 13:03:32,192 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/api/docs/
1911
+ 2026-01-28 13:03:32,194 | INFO | access_logger | api/main.py:21 | Response status: 404
1912
+ 2026-01-28 13:03:33,251 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/api/docs/
1913
+ 2026-01-28 13:03:33,252 | INFO | access_logger | api/main.py:21 | Response status: 404
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  langgraph==0.6.5
2
  langsmith==0.4.16
3
- langchain_groq==0.3.7
 
4
  pydantic==2.11.7
5
  datasets==4.0.0
6
  faiss-cpu==1.12.0
@@ -22,6 +23,6 @@ tiktoken==0.11.0
22
  langchain-anthropic==0.3.19
23
  pytest==8.4.1
24
  langchain_google_genai==2.1.9
25
- mangum
26
  redis==7.0.0
27
- google-genai
 
1
  langgraph==0.6.5
2
  langsmith==0.4.16
3
+ langchain-openai
4
+ langchain-groq
5
  pydantic==2.11.7
6
  datasets==4.0.0
7
  faiss-cpu==1.12.0
 
23
  langchain-anthropic==0.3.19
24
  pytest==8.4.1
25
  langchain_google_genai==2.1.9
26
+ mangum==0.19.0
27
  redis==7.0.0
28
+ google-genai==1.50.1
src/genai/analytics_chatbot/utils/endpoint_graphs.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "endpoint": "/api/v1/overview/buzz_trend",
4
+ "graph": "vertical_bar_chart"
5
+ },
6
+ {
7
+ "endpoint": "/api/v1/overview/basic_info",
8
+ "graph": "kpi_cards"
9
+ },
10
+ {
11
+ "endpoint": "/api/v1/overview/sentiment_trend",
12
+ "graph": "line_chart"
13
+ }
14
+ ]
15
+
src/genai/analytics_chatbot/utils/nodes.py CHANGED
@@ -77,22 +77,7 @@ class RetrieveExactEndpoint:
77
  "needed_parameters": endpoint_info["parameters"]
78
  }
79
 
80
- class QueryCheckNode:
81
- def __init__(self):
82
- self.llm = llm_gpt
83
 
84
- def run(self, state:State):
85
- try:
86
- print('Entered to query checking')
87
- messages = [SystemMessage(content=query_check_prompt),
88
- HumanMessage(content=f'''The user query is: {state['latest_message']}''')]
89
- result = self.llm.invoke(messages)
90
- print(result.content)
91
- return{'query_type': result.content}
92
-
93
- except Exception as e:
94
- print('Error occoured:', e)
95
- return {'error_message': str(e)}
96
 
97
 
98
  class FetchParametersNode:
@@ -111,17 +96,6 @@ class FetchParametersNode:
111
  result = self.llm.with_structured_output(ParameterFormatter, method='function_calling').invoke(messages)
112
  parameters_values = {k: (process_query(v) if isinstance(v, str) else v) for k, v in result.parameters_values.items()}
113
 
114
- # if 'single_influencer_query' in state['query_type']:
115
- # print('The parameter values:', parameters_values)
116
- # return {
117
- # 'parameters_values':parameters_values
118
- # }
119
- # elif 'aggregate_query' in state['query_type']:
120
- # parameters_values['influencer_username'] = ['divyadhakal_','munachiya','mydarlingfood','_its.me.muskan_']
121
- # print('The parameter values:', parameters_values)
122
- # return{
123
- # 'parameters_values': parameters_values
124
- # }
125
  print('The parameter values:', parameters_values)
126
  return {'parameters_values': parameters_values}
127
  except Exception as e:
@@ -159,32 +133,17 @@ class FetchDataNode:
159
  print('Entered to handler.')
160
  handler = self.endpoint_handlers[state['endpoint']]
161
  response = handler(state, llm_gpt, url)
162
- print('Returned by handler.')
 
163
  return {'response':response.json()}
164
 
165
 
166
  elif 'single_influencer_query' in state['query_type']:
167
  response = requests.get(url, params=state['parameters_values'],headers=self.headers)
168
- print('Data from api:', response)
169
  return {'response':response.json()}
170
 
171
- # elif 'aggregate_query' in state['query_type']:
172
- # print('Entered to aggregrated query execution')
173
- # print(state['parameters_values'])
174
- # params = state["parameters_values"]
175
- # if "influencer_username" in params and isinstance(params["influencer_username"], list):
176
- # results = {}
177
-
178
- # # Iterate through each influencer username
179
- # for username in params["influencer_username"]:
180
- # current_params = params.copy()
181
- # current_params["influencer_username"] = username
182
-
183
- # response = requests.get(url, params=current_params, headers=self.headers)
184
- # results[username] = response.json() # Store influencer-wise response
185
- # print('Data from api:', response)
186
- # return {"response": results}
187
-
188
 
189
  except Exception as e:
190
  print('Error occoured:', e)
 
77
  "needed_parameters": endpoint_info["parameters"]
78
  }
79
 
 
 
 
80
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
 
83
  class FetchParametersNode:
 
96
  result = self.llm.with_structured_output(ParameterFormatter, method='function_calling').invoke(messages)
97
  parameters_values = {k: (process_query(v) if isinstance(v, str) else v) for k, v in result.parameters_values.items()}
98
 
 
 
 
 
 
 
 
 
 
 
 
99
  print('The parameter values:', parameters_values)
100
  return {'parameters_values': parameters_values}
101
  except Exception as e:
 
133
  print('Entered to handler.')
134
  handler = self.endpoint_handlers[state['endpoint']]
135
  response = handler(state, llm_gpt, url)
136
+ print('Returned by handler:', response.json())
137
+
138
  return {'response':response.json()}
139
 
140
 
141
  elif 'single_influencer_query' in state['query_type']:
142
  response = requests.get(url, params=state['parameters_values'],headers=self.headers)
143
+ print('Data from api:', response.json())
144
  return {'response':response.json()}
145
 
146
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  except Exception as e:
149
  print('Error occoured:', e)