subashpoudel commited on
Commit
8ce97f0
Β·
1 Parent(s): 872d043

Implemented the analytics too in orchestrator

Browse files
Files changed (39) hide show
  1. .DS_Store +0 -0
  2. api/routers/__pycache__/orchestration.cpython-313.pyc +0 -0
  3. api/routers/analytics_chatbot.py +3 -5
  4. api/routers/generate_final_story.py +1 -1
  5. api/routers/generate_image.py +1 -1
  6. api/routers/human_idea_refining.py +4 -4
  7. api/routers/orchestration.py +15 -9
  8. api/routers/show_analytics.py +1 -1
  9. check.py +6 -25
  10. logs/access.log +195 -0
  11. logs/app.log +9 -0
  12. logs/errors.log +34 -0
  13. src/.DS_Store +0 -0
  14. src/genai/analytics_chatbot/utils/name_variations.json +92 -28
  15. src/genai/analytics_chatbot/utils/nodes.py +25 -3
  16. src/genai/analytics_chatbot/utils/prompts.py +9 -1
  17. src/genai/analytics_chatbot/utils/schemas.py +5 -1
  18. src/genai/analytics_chatbot/utils/state.py +1 -0
  19. src/genai/analytics_chatbot/utils/streamlit_app.py +50 -0
  20. src/genai/analytics_chatbot/utils/utils.py +2 -1
  21. src/genai/orchestration_agent/__pycache__/agent.cpython-313.pyc +0 -0
  22. src/genai/orchestration_agent/agent.py +16 -41
  23. src/genai/orchestration_agent/utils/__pycache__/nodes.cpython-313.pyc +0 -0
  24. src/genai/orchestration_agent/utils/__pycache__/prompts.cpython-313.pyc +0 -0
  25. src/genai/orchestration_agent/utils/nodes.py +108 -9
  26. src/genai/orchestration_agent/utils/prompts.py +56 -13
  27. src/genai/orchestration_agent/utils/schemas.py +16 -0
  28. src/genai/orchestration_agent/utils/state.py +12 -17
  29. src/genai/orchestration_agent/utils/tools.py +4 -1
  30. src/genai/orchestration_agent/utils/utils.py +16 -0
  31. src/genai/utils/__pycache__/models_loader.cpython-313.pyc +0 -0
  32. src/genai/utils/base_endpoint.py +1 -0
  33. src/genai/utils/models_loader.py +12 -11
  34. streamlit_app.py +50 -0
  35. tests/test_analytics.py +1 -1
  36. tests/test_analytics_chatbot.py +10 -0
  37. tests/test_final_story.py +1 -1
  38. tests/test_image_generation.py +1 -1
  39. tests/test_orchestrator.py +1 -1
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
api/routers/__pycache__/orchestration.cpython-313.pyc CHANGED
Binary files a/api/routers/__pycache__/orchestration.cpython-313.pyc and b/api/routers/__pycache__/orchestration.cpython-313.pyc differ
 
api/routers/analytics_chatbot.py CHANGED
@@ -15,15 +15,13 @@ router = APIRouter()
15
  agent=ChatbotAgent()
16
  graph = agent.chatbot_graph()
17
 
18
- @router.post("/analytics-chatbot")
19
- def context_analysis(msg: UserMessage):
20
- user_query=process_query(msg.message)
21
  config={"configurable": {"thread_id": "analytics-chatbot-thread"}}
22
  try:
23
  result=graph.invoke({'messages':user_query},config=config)
24
  return {
25
- 'endpoint': result['endpoint'] ,
26
- 'parameters': result['parameters'],
27
  'response': result['response'],
28
  }
29
  except Exception as e:
 
15
  agent=ChatbotAgent()
16
  graph = agent.chatbot_graph()
17
 
18
+ @router.get("/analytics-chatbot")
19
+ def get_analytics(msg:str):
20
+ user_query=process_query(msg)
21
  config={"configurable": {"thread_id": "analytics-chatbot-thread"}}
22
  try:
23
  result=graph.invoke({'messages':user_query},config=config)
24
  return {
 
 
25
  'response': result['response'],
26
  }
27
  except Exception as e:
api/routers/generate_final_story.py CHANGED
@@ -8,7 +8,7 @@ router= APIRouter()
8
  app_logger = logging.getLogger("app_logger")
9
  error_logger = logging.getLogger("error_logger")
10
 
11
- @router.post("/generate-final-story")
12
  def generate_final_story_endpoint():
13
  try:
14
  def event_stream():
 
8
  app_logger = logging.getLogger("app_logger")
9
  error_logger = logging.getLogger("error_logger")
10
 
11
+ @router.get("/generate-final-story")
12
  def generate_final_story_endpoint():
13
  try:
14
  def event_stream():
api/routers/generate_image.py CHANGED
@@ -8,7 +8,7 @@ router = APIRouter()
8
  app_logger = logging.getLogger("app_logger")
9
  error_logger = logging.getLogger("error_logger")
10
 
11
- @router.post("/generate-image")
12
  def generate_image_endpoint():
13
  try:
14
  image = ImageGenerator().generate_image(str(stored_data.get('final_story',image_gen_backup_prompt))
 
8
  app_logger = logging.getLogger("app_logger")
9
  error_logger = logging.getLogger("error_logger")
10
 
11
+ @router.get("/generate-image")
12
  def generate_image_endpoint():
13
  try:
14
  image = ImageGenerator().generate_image(str(stored_data.get('final_story',image_gen_backup_prompt))
api/routers/human_idea_refining.py CHANGED
@@ -9,16 +9,16 @@ human_refine_graph = human_refined_idea()
9
  app_logger = logging.getLogger("app_logger")
10
  error_logger = logging.getLogger("error_logger")
11
 
12
- @router.post("/human-idea-refining")
13
- def human_idea_refine_endpoint(request:RefineIdeationRequest):
14
  try:
15
- stored_data['human_ideation_interactions'].append({"role": "user", "content": request.query})
16
  response = human_refine_graph.invoke(
17
  {
18
  'query': stored_data['human_ideation_interactions'],
19
  'business_details': stored_data["business_details"],
20
  'final_ideation': stored_data.get('final_ideation',["","","",""]),
21
- },config={"configurable": {"thread_id": request.thread_id}}
22
  )
23
  stored_data['human_ideation_interactions'].append({"role": "assistant", "content": response['result']})
24
  stored_data['refined_ideation'] = stored_data['human_ideation_interactions'][-1]['content']
 
9
  app_logger = logging.getLogger("app_logger")
10
  error_logger = logging.getLogger("error_logger")
11
 
12
+ @router.get("/human-idea-refining")
13
+ def human_idea_refine_endpoint(query:str):
14
  try:
15
+ stored_data['human_ideation_interactions'].append({"role": "user", "content": query})
16
  response = human_refine_graph.invoke(
17
  {
18
  'query': stored_data['human_ideation_interactions'],
19
  'business_details': stored_data["business_details"],
20
  'final_ideation': stored_data.get('final_ideation',["","","",""]),
21
+ },config={"configurable": {"thread_id": "human-idea-refine-thread"}}
22
  )
23
  stored_data['human_ideation_interactions'].append({"role": "assistant", "content": response['result']})
24
  stored_data['refined_ideation'] = stored_data['human_ideation_interactions'][-1]['content']
api/routers/orchestration.py CHANGED
@@ -2,28 +2,34 @@ import logging
2
  from fastapi import APIRouter
3
  from api.stored_data import stored_data
4
  from src.genai.orchestration_agent.agent import OrchestrationAgent
 
5
  from api.schemas.orchestration import OrchestrationRequest
 
6
 
7
  app_logger = logging.getLogger("app_logger")
8
  error_logger = logging.getLogger("error_logger")
9
 
10
  router= APIRouter()
11
  agent = OrchestrationAgent()
 
12
 
13
  @router.post("/orchestration", description="Gives the analytics of influencers.")
14
  def orchestration_endpoint(request:OrchestrationRequest):
15
  try:
16
- result = agent.chat(request.message , request.image_base64)
17
- if result.image_caption != '': stored_data['image_caption']=result.image_caption
18
- if result.video_idea !='' or result.video_idea != 'null': stored_data['refined_ideation']= result.video_idea
19
- if result.video_story!='' or result.video_story!='null': stored_data['final_story']= result.video_story
 
 
 
 
 
20
  app_logger.info('Orchestrator executed')
21
 
22
- return {'tool_response': result.tool ,
23
- 'message_response': result.query_response,
24
- 'image_caption':result.image_caption,
25
- 'video_idea': result.video_idea,
26
- 'video_story': result.video_story}
27
  except Exception as e:
28
  error_logger.error(f'Unable to run orchestration: {e}')
29
  raise
 
2
  from fastapi import APIRouter
3
  from api.stored_data import stored_data
4
  from src.genai.orchestration_agent.agent import OrchestrationAgent
5
+ # from src.genai.orchestration_agent.utils.utils import handle_tools
6
  from api.schemas.orchestration import OrchestrationRequest
7
+ from check import handle_tools
8
 
9
  app_logger = logging.getLogger("app_logger")
10
  error_logger = logging.getLogger("error_logger")
11
 
12
  router= APIRouter()
13
  agent = OrchestrationAgent()
14
+ graph = agent.orchestration_graph()
15
 
16
  @router.post("/orchestration", description="Gives the analytics of influencers.")
17
  def orchestration_endpoint(request:OrchestrationRequest):
18
  try:
19
+ config={"configurable": {"thread_id": "orchestration-thread"}}
20
+ result = graph.invoke({'messages':[request.message],
21
+ 'image_base64': request.image_base64}, config=config)
22
+ print('Result tools:', result['tools'])
23
+ tools=handle_tools(result['tools'], stored_data)
24
+ print('Tools:',tools)
25
+ if result['image_caption'] is not None: stored_data['image_caption']=result['image_caption']
26
+ # if result['video_idea'] !='' or result.video_idea != 'null': stored_data['refined_ideation']= result['video_idea']
27
+ # if result['video_story']!='' or result.video_story!='null': stored_data['final_story']= result['video_story']
28
  app_logger.info('Orchestrator executed')
29
 
30
+ return {'tool_response':tools,
31
+ 'message_response': result['query_response'],}
32
+
 
 
33
  except Exception as e:
34
  error_logger.error(f'Unable to run orchestration: {e}')
35
  raise
api/routers/show_analytics.py CHANGED
@@ -7,7 +7,7 @@ app_logger = logging.getLogger("app_logger")
7
  error_logger = logging.getLogger("error_logger")
8
 
9
  router=APIRouter()
10
- @router.post("/show-analytics")
11
  def show_analytics_endpoint():
12
  try:
13
  response = AnalyticsViewer(stored_data['business_details']).show_analytics()
 
7
  error_logger = logging.getLogger("error_logger")
8
 
9
  router=APIRouter()
10
+ @router.get("/show-analytics")
11
  def show_analytics_endpoint():
12
  try:
13
  response = AnalyticsViewer(stored_data['business_details']).show_analytics()
check.py CHANGED
@@ -1,28 +1,9 @@
1
- import requests
 
 
2
 
3
- # Define the endpoint URL
4
- url = "https://reveltrends.vercel.app/api/v1/overview/buzz_trend"
 
5
 
6
- # Set the parameters
7
- params = {
8
- "influencer_username": "divyadhakal_",
9
- "period": "weekly"
10
- }
11
-
12
- # Optional: headers if your API requires authentication
13
- headers = {
14
- "Authorization": "Bearer YOUR_API_KEY", # replace with your API key if needed
15
- "Content-Type": "application/json"
16
- }
17
-
18
- # Make the GET request
19
- response = requests.get(url, params=params, headers=headers)
20
-
21
- # Check if the request was successful
22
- if response.status_code == 200:
23
- data = response.json()
24
- print("Response data:", data)
25
- else:
26
- print("Request failed with status code:", response.status_code)
27
- print("Response:", response.text)
28
 
 
1
+ state = {}
2
+ state['tools']= ['ideation', 'generate-story']
3
+ base_url = 'http://127.0.0.1:8000/api/'
4
 
5
+ for tool in state['tools']:
6
+ url = f'''{base_url}{tool}'''
7
+ print('The url is:', url)
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
logs/access.log CHANGED
@@ -952,3 +952,198 @@
952
  2025-09-14 17:44:12,219 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
953
  2025-09-14 17:45:22,641 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
954
  2025-09-14 17:45:29,855 | INFO | access_logger | api/main.py:21 | Response status: 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
952
  2025-09-14 17:44:12,219 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
953
  2025-09-14 17:45:22,641 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
954
  2025-09-14 17:45:29,855 | INFO | access_logger | api/main.py:21 | Response status: 200
955
+ 2025-09-15 11:50:06,693 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/analytics-chatbot
956
+ 2025-09-15 11:50:06,694 | INFO | access_logger | api/main.py:21 | Response status: 404
957
+ 2025-09-15 11:50:56,104 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
958
+ 2025-09-15 11:50:57,898 | INFO | access_logger | api/main.py:21 | Response status: 200
959
+ 2025-09-15 11:52:47,352 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
960
+ 2025-09-15 11:52:48,299 | INFO | access_logger | api/main.py:21 | Response status: 200
961
+ 2025-09-15 11:53:07,927 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
962
+ 2025-09-15 11:53:09,685 | INFO | access_logger | api/main.py:21 | Response status: 200
963
+ 2025-09-15 11:53:47,286 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
964
+ 2025-09-15 11:53:49,010 | INFO | access_logger | api/main.py:21 | Response status: 200
965
+ 2025-09-15 11:54:59,228 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
966
+ 2025-09-15 11:55:01,098 | INFO | access_logger | api/main.py:21 | Response status: 200
967
+ 2025-09-15 11:55:11,270 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
968
+ 2025-09-15 11:55:12,882 | INFO | access_logger | api/main.py:21 | Response status: 200
969
+ 2025-09-15 11:58:40,712 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
970
+ 2025-09-15 11:58:42,390 | INFO | access_logger | api/main.py:21 | Response status: 200
971
+ 2025-09-15 11:59:07,842 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
972
+ 2025-09-15 11:59:09,628 | INFO | access_logger | api/main.py:21 | Response status: 200
973
+ 2025-09-15 12:07:35,403 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
974
+ 2025-09-15 12:07:42,354 | INFO | access_logger | api/main.py:21 | Response status: 200
975
+ 2025-09-15 12:08:33,997 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
976
+ 2025-09-15 12:08:35,706 | INFO | access_logger | api/main.py:21 | Response status: 200
977
+ 2025-09-15 12:08:51,470 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
978
+ 2025-09-15 12:08:53,422 | INFO | access_logger | api/main.py:21 | Response status: 200
979
+ 2025-09-15 12:09:35,825 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
980
+ 2025-09-15 12:09:36,827 | INFO | access_logger | api/main.py:21 | Response status: 200
981
+ 2025-09-15 12:09:59,297 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
982
+ 2025-09-15 12:10:01,009 | INFO | access_logger | api/main.py:21 | Response status: 200
983
+ 2025-09-15 12:11:02,343 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
984
+ 2025-09-15 12:11:03,949 | INFO | access_logger | api/main.py:21 | Response status: 200
985
+ 2025-09-15 12:21:45,185 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
986
+ 2025-09-15 12:21:51,741 | INFO | access_logger | api/main.py:21 | Response status: 200
987
+ 2025-09-15 12:22:08,309 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
988
+ 2025-09-15 12:22:10,521 | INFO | access_logger | api/main.py:21 | Response status: 200
989
+ 2025-09-15 12:23:05,352 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
990
+ 2025-09-15 12:23:07,148 | INFO | access_logger | api/main.py:21 | Response status: 200
991
+ 2025-09-15 12:34:44,250 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
992
+ 2025-09-15 12:34:53,895 | INFO | access_logger | api/main.py:21 | Response status: 200
993
+ 2025-09-15 12:35:07,580 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
994
+ 2025-09-15 12:35:08,516 | INFO | access_logger | api/main.py:21 | Response status: 200
995
+ 2025-09-15 12:35:17,618 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
996
+ 2025-09-15 12:35:19,328 | INFO | access_logger | api/main.py:21 | Response status: 200
997
+ 2025-09-15 12:35:34,930 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
998
+ 2025-09-15 12:35:36,582 | INFO | access_logger | api/main.py:21 | Response status: 200
999
+ 2025-09-15 13:58:13,346 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1000
+ 2025-09-15 13:58:15,706 | INFO | access_logger | api/main.py:21 | Response status: 200
1001
+ 2025-09-15 14:06:50,566 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1002
+ 2025-09-15 14:06:56,615 | INFO | access_logger | api/main.py:21 | Response status: 200
1003
+ 2025-09-15 14:09:49,888 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1004
+ 2025-09-15 14:09:56,096 | INFO | access_logger | api/main.py:21 | Response status: 200
1005
+ 2025-09-15 14:10:03,978 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1006
+ 2025-09-15 14:10:05,630 | INFO | access_logger | api/main.py:21 | Response status: 200
1007
+ 2025-09-16 11:33:01,536 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1008
+ 2025-09-16 11:33:03,977 | INFO | access_logger | api/main.py:21 | Response status: 200
1009
+ 2025-09-16 11:35:38,289 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1010
+ 2025-09-16 11:35:40,132 | INFO | access_logger | api/main.py:21 | Response status: 200
1011
+ 2025-09-16 11:43:04,556 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1012
+ 2025-09-16 11:43:05,586 | INFO | access_logger | api/main.py:21 | Response status: 200
1013
+ 2025-09-16 11:43:19,023 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1014
+ 2025-09-16 11:43:20,025 | INFO | access_logger | api/main.py:21 | Response status: 200
1015
+ 2025-09-16 11:43:23,137 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1016
+ 2025-09-16 11:43:24,147 | INFO | access_logger | api/main.py:21 | Response status: 200
1017
+ 2025-09-16 11:43:25,798 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1018
+ 2025-09-16 11:43:26,762 | INFO | access_logger | api/main.py:21 | Response status: 200
1019
+ 2025-09-16 11:43:44,959 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1020
+ 2025-09-16 11:43:46,013 | INFO | access_logger | api/main.py:21 | Response status: 200
1021
+ 2025-09-16 11:43:47,261 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1022
+ 2025-09-16 11:43:48,176 | INFO | access_logger | api/main.py:21 | Response status: 200
1023
+ 2025-09-16 11:43:49,295 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1024
+ 2025-09-16 11:43:50,327 | INFO | access_logger | api/main.py:21 | Response status: 200
1025
+ 2025-09-16 11:43:51,418 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1026
+ 2025-09-16 11:43:52,479 | INFO | access_logger | api/main.py:21 | Response status: 200
1027
+ 2025-09-16 11:50:26,191 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1028
+ 2025-09-16 11:50:27,459 | INFO | access_logger | api/main.py:21 | Response status: 200
1029
+ 2025-09-16 11:52:28,068 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1030
+ 2025-09-16 11:52:34,115 | INFO | access_logger | api/main.py:21 | Response status: 200
1031
+ 2025-09-16 11:53:45,854 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1032
+ 2025-09-16 11:53:48,263 | INFO | access_logger | api/main.py:21 | Response status: 200
1033
+ 2025-09-16 11:54:21,141 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1034
+ 2025-09-16 11:54:23,279 | INFO | access_logger | api/main.py:21 | Response status: 200
1035
+ 2025-09-16 11:54:55,691 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1036
+ 2025-09-16 11:54:57,176 | INFO | access_logger | api/main.py:21 | Response status: 200
1037
+ 2025-09-16 12:10:16,748 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1038
+ 2025-09-16 12:10:23,090 | INFO | access_logger | api/main.py:21 | Response status: 200
1039
+ 2025-09-16 13:22:28,412 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1040
+ 2025-09-16 13:22:28,415 | INFO | access_logger | api/main.py:21 | Response status: 200
1041
+ 2025-09-16 13:22:28,560 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1042
+ 2025-09-16 13:22:28,569 | INFO | access_logger | api/main.py:21 | Response status: 200
1043
+ 2025-09-16 13:22:51,871 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1044
+ 2025-09-16 13:22:58,600 | INFO | access_logger | api/main.py:21 | Response status: 200
1045
+ 2025-09-16 13:32:55,717 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1046
+ 2025-09-16 13:33:02,195 | INFO | access_logger | api/main.py:21 | Response status: 200
1047
+ 2025-09-17 11:30:00,428 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1048
+ 2025-09-17 11:30:16,056 | INFO | access_logger | api/main.py:21 | Response status: 200
1049
+ 2025-09-17 11:31:42,301 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1050
+ 2025-09-17 11:31:43,870 | INFO | access_logger | api/main.py:21 | Response status: 200
1051
+ 2025-09-17 11:33:56,543 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1052
+ 2025-09-17 11:33:57,964 | INFO | access_logger | api/main.py:21 | Response status: 200
1053
+ 2025-09-17 11:37:45,150 | INFO | access_logger | api/main.py:19 | Request: POST http://localhost:8000/api/analytics-chatbot
1054
+ 2025-09-17 11:37:55,572 | INFO | access_logger | api/main.py:21 | Response status: 200
1055
+ 2025-09-17 16:34:27,154 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1056
+ 2025-09-17 16:34:27,156 | INFO | access_logger | api/main.py:21 | Response status: 200
1057
+ 2025-09-17 16:34:27,201 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1058
+ 2025-09-17 16:34:27,208 | INFO | access_logger | api/main.py:21 | Response status: 200
1059
+ 2025-09-17 16:34:40,700 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1060
+ 2025-09-17 16:40:32,365 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1061
+ 2025-09-17 16:46:58,664 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1062
+ 2025-09-17 16:49:16,103 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1063
+ 2025-09-17 16:50:47,169 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1064
+ 2025-09-17 16:52:54,084 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1065
+ 2025-09-17 16:53:07,080 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1066
+ 2025-09-17 16:53:14,927 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1067
+ 2025-09-17 16:53:16,795 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1068
+ 2025-09-17 16:53:18,461 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1069
+ 2025-09-17 16:53:19,969 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1070
+ 2025-09-17 16:56:54,750 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1071
+ 2025-09-17 16:57:58,866 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1072
+ 2025-09-17 16:58:58,334 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1073
+ 2025-09-17 16:59:42,166 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1074
+ 2025-09-17 17:05:10,257 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1075
+ 2025-09-17 17:06:32,797 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1076
+ 2025-09-17 17:08:18,515 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1077
+ 2025-09-17 17:13:46,506 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1078
+ 2025-09-17 17:15:07,661 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1079
+ 2025-09-17 17:17:47,360 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1080
+ 2025-09-17 17:20:08,828 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1081
+ 2025-09-17 17:24:23,981 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1082
+ 2025-09-17 17:25:34,126 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1083
+ 2025-09-17 17:26:28,448 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1084
+ 2025-09-17 17:26:55,591 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1085
+ 2025-09-18 23:26:31,127 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/
1086
+ 2025-09-18 23:26:31,128 | INFO | access_logger | api/main.py:21 | Response status: 200
1087
+ 2025-09-18 23:26:31,129 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/
1088
+ 2025-09-18 23:26:31,130 | INFO | access_logger | api/main.py:21 | Response status: 200
1089
+ 2025-09-18 23:26:31,140 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/favicon.ico
1090
+ 2025-09-18 23:26:31,140 | INFO | access_logger | api/main.py:21 | Response status: 404
1091
+ 2025-09-18 23:26:34,806 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1092
+ 2025-09-18 23:26:34,807 | INFO | access_logger | api/main.py:21 | Response status: 200
1093
+ 2025-09-18 23:26:35,085 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1094
+ 2025-09-18 23:26:35,093 | INFO | access_logger | api/main.py:21 | Response status: 200
1095
+ 2025-09-18 23:26:46,994 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1096
+ 2025-09-18 23:28:05,982 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1097
+ 2025-09-18 23:34:11,812 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1098
+ 2025-09-18 23:38:47,616 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1099
+ 2025-09-18 23:38:48,527 | INFO | access_logger | api/main.py:21 | Response status: 200
1100
+ 2025-09-18 23:40:12,179 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1101
+ 2025-09-18 23:40:24,570 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1102
+ 2025-09-18 23:40:42,113 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1103
+ 2025-09-18 23:53:07,604 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1104
+ 2025-09-18 23:54:16,252 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1105
+ 2025-09-18 23:54:16,670 | INFO | access_logger | api/main.py:21 | Response status: 200
1106
+ 2025-09-18 23:56:47,087 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1107
+ 2025-09-18 23:59:35,816 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1108
+ 2025-09-18 23:59:40,126 | INFO | access_logger | api/main.py:21 | Response status: 200
1109
+ 2025-09-19 00:03:23,846 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1110
+ 2025-09-19 00:03:24,241 | INFO | access_logger | api/main.py:21 | Response status: 200
1111
+ 2025-09-19 00:04:47,367 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1112
+ 2025-09-19 00:04:47,830 | INFO | access_logger | api/main.py:21 | Response status: 200
1113
+ 2025-09-19 00:05:40,499 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1114
+ 2025-09-19 00:05:40,809 | INFO | access_logger | api/main.py:21 | Response status: 200
1115
+ 2025-09-19 00:06:25,132 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1116
+ 2025-09-19 00:06:25,785 | INFO | access_logger | api/main.py:21 | Response status: 200
1117
+ 2025-09-19 00:09:13,114 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/orchestration
1118
+ 2025-09-19 00:09:13,638 | INFO | access_logger | api/main.py:21 | Response status: 200
1119
+ 2025-09-19 12:50:09,389 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/
1120
+ 2025-09-19 12:50:09,389 | INFO | access_logger | api/main.py:21 | Response status: 200
1121
+ 2025-09-19 12:50:09,390 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/
1122
+ 2025-09-19 12:50:09,391 | INFO | access_logger | api/main.py:21 | Response status: 200
1123
+ 2025-09-19 12:50:09,435 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/favicon.ico
1124
+ 2025-09-19 12:50:09,436 | INFO | access_logger | api/main.py:21 | Response status: 404
1125
+ 2025-09-19 12:50:12,550 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1126
+ 2025-09-19 12:50:12,550 | INFO | access_logger | api/main.py:21 | Response status: 200
1127
+ 2025-09-19 12:50:12,626 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1128
+ 2025-09-19 12:50:12,634 | INFO | access_logger | api/main.py:21 | Response status: 200
1129
+ 2025-09-19 12:52:52,297 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1130
+ 2025-09-19 12:52:52,298 | INFO | access_logger | api/main.py:21 | Response status: 200
1131
+ 2025-09-19 12:52:52,338 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1132
+ 2025-09-19 12:52:52,345 | INFO | access_logger | api/main.py:21 | Response status: 200
1133
+ 2025-09-19 12:52:53,490 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/
1134
+ 2025-09-19 12:52:53,492 | INFO | access_logger | api/main.py:21 | Response status: 200
1135
+ 2025-09-19 12:52:56,202 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1136
+ 2025-09-19 12:52:56,203 | INFO | access_logger | api/main.py:21 | Response status: 200
1137
+ 2025-09-19 12:52:56,238 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1138
+ 2025-09-19 12:52:56,238 | INFO | access_logger | api/main.py:21 | Response status: 200
1139
+ 2025-09-19 12:53:25,025 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20the%20sentiment%20distribution%20of%20divya%20dhakal
1140
+ 2025-09-19 12:54:02,309 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20the%20sentiment%20distribution%20of%20divya%20dhakal
1141
+ 2025-09-19 12:54:09,594 | INFO | access_logger | api/main.py:21 | Response status: 200
1142
+ 2025-09-19 13:01:22,631 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1143
+ 2025-09-19 13:01:22,632 | INFO | access_logger | api/main.py:21 | Response status: 200
1144
+ 2025-09-19 13:01:22,672 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1145
+ 2025-09-19 13:01:22,678 | INFO | access_logger | api/main.py:21 | Response status: 200
1146
+ 2025-09-19 13:03:36,189 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/docs
1147
+ 2025-09-19 13:03:36,190 | INFO | access_logger | api/main.py:21 | Response status: 200
1148
+ 2025-09-19 13:03:36,231 | INFO | access_logger | api/main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json
1149
+ 2025-09-19 13:03:36,237 | INFO | access_logger | api/main.py:21 | Response status: 200
logs/app.log CHANGED
@@ -110,3 +110,12 @@
110
  2025-09-14 13:43:49,289 | INFO | app_logger | api/routers/ideation.py:33 | Executed the ideation pipeline.
111
  2025-09-14 13:46:03,905 | INFO | app_logger | api/routers/ideation.py:33 | Executed the ideation pipeline.
112
  2025-09-14 13:48:33,762 | INFO | app_logger | api/routers/ideation.py:33 | Executed the ideation pipeline.
 
 
 
 
 
 
 
 
 
 
110
  2025-09-14 13:43:49,289 | INFO | app_logger | api/routers/ideation.py:33 | Executed the ideation pipeline.
111
  2025-09-14 13:46:03,905 | INFO | app_logger | api/routers/ideation.py:33 | Executed the ideation pipeline.
112
  2025-09-14 13:48:33,762 | INFO | app_logger | api/routers/ideation.py:33 | Executed the ideation pipeline.
113
+ 2025-09-18 23:38:48,525 | INFO | app_logger | api/routers/orchestration.py:26 | Orchestrator executed
114
+ 2025-09-18 23:54:16,668 | INFO | app_logger | api/routers/orchestration.py:26 | Orchestrator executed
115
+ 2025-09-18 23:56:47,468 | INFO | app_logger | api/routers/orchestration.py:26 | Orchestrator executed
116
+ 2025-09-18 23:59:40,124 | INFO | app_logger | api/routers/orchestration.py:26 | Orchestrator executed
117
+ 2025-09-19 00:03:24,239 | INFO | app_logger | api/routers/orchestration.py:28 | Orchestrator executed
118
+ 2025-09-19 00:04:47,828 | INFO | app_logger | api/routers/orchestration.py:28 | Orchestrator executed
119
+ 2025-09-19 00:05:40,808 | INFO | app_logger | api/routers/orchestration.py:28 | Orchestrator executed
120
+ 2025-09-19 00:06:25,784 | INFO | app_logger | api/routers/orchestration.py:28 | Orchestrator executed
121
+ 2025-09-19 00:09:13,637 | INFO | app_logger | api/routers/orchestration.py:28 | Orchestrator executed
logs/errors.log CHANGED
@@ -40,3 +40,37 @@ unique_selected_ideas.0
40
  unique_selected_ideas.0
41
  Input should be a valid dictionary [type=dict_type, input_value=[{'hook': 'Can my gear su...with special effects.'}], input_type=list]
42
  For further information visit https://errors.pydantic.dev/2.11/v/dict_type
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  unique_selected_ideas.0
41
  Input should be a valid dictionary [type=dict_type, input_value=[{'hook': 'Can my gear su...with special effects.'}], input_type=list]
42
  For further information visit https://errors.pydantic.dev/2.11/v/dict_type
43
+ 2025-09-17 16:34:43,839 | ERROR | error_logger | api/routers/orchestration.py:28 | Unable to run orchestration: Error code: 400 - {'error': {'message': "tool call validation failed: parameters for tool ToolResponseFormatter did not match schema: errors: [missing properties: 'tool', 'query_response']", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=ToolResponseFormatter> {"name": "analytics", "tools": ["analytics"]} </function>'}}
44
+ 2025-09-17 16:40:32,718 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "tool call validation failed: parameters for tool ToolResponseFormatter did not match schema: errors: [missing properties: 'tool']", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=ToolResponseFormatter> {"name": "Influencer Divya dhakal has got 66 likes.", "tools": ["analytics"], "query_response": "Acknowledging your request to get information about influencer Divya dhakal."}</function>'}}
45
+ 2025-09-17 16:46:59,118 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "tool call validation failed: parameters for tool ToolResponseFormatter did not match schema: errors: [missing properties: 'tool']", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=ToolResponseFormatter> {"name": "tools", "tools": [], "query_response": "Hello! I\'m here to help you with generating content ideas, stories, and visuals. How can I assist you today?", "influencer_data": {"name": "Divya Dhakal", "likes": 66}} </function>'}}
46
+ 2025-09-17 16:49:17,668 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "tool call validation failed: parameters for tool ToolResponseFormatter did not match schema: errors: [missing properties: 'tool']", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=ToolResponseFormatter> {"name": "Divya Dhakal", "likes": 66, "query_response": "Acknowledging your request. No tools required for this query."}</function>'}}
47
+ 2025-09-17 16:50:47,677 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "tool call validation failed: parameters for tool ToolResponseFormatter did not match schema: errors: [missing properties: 'tool']", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=ToolResponseFormatter> {"name": "tools", "tools": [], "query_response": "Hello! I\'m here to help you with generating content ideas, stories, and visuals. How can I assist you today?", "influencer_data": {"name": "Divya Dhakal", "likes": 66}} </function>'}}
48
+ 2025-09-17 16:52:54,768 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{}<function>'}}
49
+ 2025-09-17 16:53:07,622 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{}<function>'}}
50
+ 2025-09-17 16:53:15,451 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{}<function>'}}
51
+ 2025-09-17 16:53:17,253 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{}<function>'}}
52
+ 2025-09-17 16:53:18,990 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{}<function>'}}
53
+ 2025-09-17 16:53:28,546 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{}<function>'}}
54
+ 2025-09-17 16:56:55,386 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{}<function>'}}
55
+ 2025-09-17 16:57:59,301 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': 'Tool choice is required, but model did not call a tool', 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{\n "tools": [],\n "query_response": "Hello! How can I assist you today? 😊"\n}'}}
56
+ 2025-09-17 16:58:58,690 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Tool call validation failed: tool call validation failed: attempted to call tool 'tools' which was not in request.tools", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{"name": "tools", "arguments": {"tools": []}}'}}
57
+ 2025-09-17 16:59:42,521 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Tool call validation failed: tool call validation failed: attempted to call tool 'commentary' which was not in request.tools", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{"name": "commentary", "arguments": {"tools": []}}'}}
58
+ 2025-09-17 17:05:10,589 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Tool call validation failed: tool call validation failed: attempted to call tool 'json' which was not in request.tools", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{"name": "json", "arguments": {\n "tools": []\n}}'}}
59
+ 2025-09-17 17:06:33,147 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Tool call validation failed: tool call validation failed: attempted to call tool 'json' which was not in request.tools", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{"name": "json", "arguments": {\n "tools": []\n}}'}}
60
+ 2025-09-17 17:08:19,208 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "'messages' : minimum number of items is 1", 'type': 'invalid_request_error'}}
61
+ 2025-09-17 17:13:47,203 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "'messages' : minimum number of items is 1", 'type': 'invalid_request_error'}}
62
+ 2025-09-17 17:15:08,322 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "'messages' : minimum number of items is 1", 'type': 'invalid_request_error'}}
63
+ 2025-09-17 17:17:51,001 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "'messages' : minimum number of items is 1", 'type': 'invalid_request_error'}}
64
+ 2025-09-17 17:20:09,690 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "'messages' : minimum number of items is 1", 'type': 'invalid_request_error'}}
65
+ 2025-09-17 17:24:27,652 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': 'tool call validation failed: parameters for tool UserReferenceResponseFormatter did not match schema: errors: [`/video_idea`: expected string, but got null, `/video_story`: expected string, but got null]', 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{"video_idea": null, "video_story": null}</function>'}}
66
+ 2025-09-17 17:25:35,033 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': 'Tool call validation failed: tool call validation failed: parameters for tool UserReferenceResponseFormatter did not match schema: errors: [`/video_idea`: expected string, but got null, `/video_story`: expected string, but got null]', 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{"name": "UserReferenceResponseFormatter", "arguments": {\n "video_idea": null,\n "video_story": null\n}}'}}
67
+ 2025-09-17 17:26:29,098 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': 'Tool choice is required, but model did not call a tool', 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{\n "tools": ["ideation"],\n "query_response": "Great! Let’s brainstorm some fresh video ideas tailored to your business. Let me know a bit about your industry or target audience, and I’ll start generating concepts for you."\n}'}}
68
+ 2025-09-17 17:26:56,020 | ERROR | error_logger | api/routers/orchestration.py:30 | Unable to run orchestration: Error code: 400 - {'error': {'message': "Tool call validation failed: tool call validation failed: attempted to call tool 'json' which was not in request.tools", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{"name": "json", "arguments": {\n "tools": ["ideation"],\n "query_response": "Great! Let’s brainstorm some fresh video ideas tailored to your business. Let me know a bit about your industry, target audience, and any themes you have in mind, and I’ll generate some creative concepts for you."\n}}'}}
69
+ 2025-09-18 23:26:46,999 | ERROR | error_logger | api/routers/orchestration.py:31 | Unable to run orchestration: Checkpointer requires one or more of the following 'configurable' keys: thread_id, checkpoint_ns, checkpoint_id
70
+ 2025-09-18 23:28:06,546 | ERROR | error_logger | api/routers/orchestration.py:32 | Unable to run orchestration: Error code: 401 - {'error': {'message': 'Invalid API Key', 'type': 'invalid_request_error', 'code': 'invalid_api_key'}}
71
+ 2025-09-18 23:34:12,324 | ERROR | error_logger | api/routers/orchestration.py:32 | Unable to run orchestration: Error code: 400 - {'error': {'message': 'Tool call validation failed: tool call validation failed: parameters for tool UserReferenceResponseFormatter did not match schema: errors: [`/video_idea`: expected string, but got null, `/video_story`: expected string, but got null]', 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '{"name": "UserReferenceResponseFormatter", "arguments": {\n "video_idea": null,\n "video_story": null\n}}'}}
72
+ 2025-09-18 23:40:12,666 | ERROR | error_logger | api/routers/orchestration.py:32 | Unable to run orchestration: Error code: 400 - {'error': {'message': 'tool call validation failed: parameters for tool UserReferenceResponseFormatter did not match schema: errors: [`/video_story`: expected string, but got null, `/video_idea`: expected string, but got null]', 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{"video_idea": null, "video_story": null}</function>'}}
73
+ 2025-09-18 23:40:24,844 | ERROR | error_logger | api/routers/orchestration.py:32 | Unable to run orchestration: Error code: 400 - {'error': {'message': 'tool call validation failed: parameters for tool UserReferenceResponseFormatter did not match schema: errors: [`/video_idea`: expected string, but got null, `/video_story`: expected string, but got null]', 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{"video_idea": null, "video_story": null}</function>'}}
74
+ 2025-09-18 23:40:42,373 | ERROR | error_logger | api/routers/orchestration.py:32 | Unable to run orchestration: Error code: 400 - {'error': {'message': 'tool call validation failed: parameters for tool UserReferenceResponseFormatter did not match schema: errors: [`/video_idea`: expected string, but got null, `/video_story`: expected string, but got null]', 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '<function=UserReferenceResponseFormatter>{"video_idea": null, "video_story": null}</function>'}}
75
+ 2025-09-18 23:53:08,173 | ERROR | error_logger | api/routers/orchestration.py:32 | Unable to run orchestration: 'video_idea'
76
+ 2025-09-18 23:56:47,469 | ERROR | error_logger | api/routers/orchestration.py:32 | Unable to run orchestration: unhashable type: 'list'
src/.DS_Store CHANGED
Binary files a/src/.DS_Store and b/src/.DS_Store differ
 
src/genai/analytics_chatbot/utils/name_variations.json CHANGED
@@ -1,39 +1,44 @@
1
  {
 
 
 
 
 
2
  "divyadhakal_": [
3
  "divya dhakal",
4
  "divya",
5
  "dhakal divya",
6
  "dibya dhakal"
7
  ],
8
- "ishtukarkee": [
9
- "istu karki",
10
- "istu karkee",
11
- "ishtu karki",
12
- "ishtu karkee",
13
- "ishtoo karkee",
14
- "ishtu",
15
- "istu"
16
- ],
17
- "kano_mama": [
18
- "kano mama",
19
- "kano",
20
- "mama kano"
21
  ],
22
  "munachiya": [
23
  "muna chiya",
24
  "muna",
25
  "chiya muna"
26
  ],
27
- "nepal_food": [
28
- "nepal food",
29
- "nepalfood",
30
- "food nepal"
31
  ],
32
- "ggkaam610": [
33
- "ggkaam",
34
- "ggkaam 610",
35
- "gg kaam",
36
- "gg_kaam"
 
 
 
 
 
 
 
 
 
37
  ],
38
  "adars_fpv": [
39
  "adarsh",
@@ -50,6 +55,11 @@
50
  "anisha",
51
  "kafle anisha"
52
  ],
 
 
 
 
 
53
  "diwasg": [
54
  "diwash gurung",
55
  "diwash",
@@ -61,10 +71,24 @@
61
  "grubfood",
62
  "grub food"
63
  ],
64
- "imsurakshyakc": [
65
- "surakshya kc",
66
- "surakshya",
67
- "kc surakshya"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  ],
69
  "jholeyism": [
70
  "jholey",
@@ -72,9 +96,49 @@
72
  "jholay",
73
  "jhuley"
74
  ],
75
- "mrbvlog2": [
 
76
  "mrb vlogs",
77
- "mrb",
78
  "mr b vlog"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  ]
80
  }
 
1
  {
2
+ "imsurakshyakc": [
3
+ "surakshya kc",
4
+ "surakshya",
5
+ "kc surakshya"
6
+ ],
7
  "divyadhakal_": [
8
  "divya dhakal",
9
  "divya",
10
  "dhakal divya",
11
  "dibya dhakal"
12
  ],
13
+ "mydarlingfood": [
14
+ "my darling food",
15
+ "darling food",
16
+ "food darling"
 
 
 
 
 
 
 
 
 
17
  ],
18
  "munachiya": [
19
  "muna chiya",
20
  "muna",
21
  "chiya muna"
22
  ],
23
+ "sanjogkoirala_": [
24
+ "sanjog koirala",
25
+ "koirala sanjog",
26
+ "sanjog"
27
  ],
28
+ "_anupchau": [
29
+ "anup chau",
30
+ "chau anup",
31
+ "anup"
32
+ ],
33
+ "_richa.s": [
34
+ "richa s",
35
+ "richa",
36
+ "s richa"
37
+ ],
38
+ "_its.me.muskan_": [
39
+ "muskan",
40
+ "its me muskan",
41
+ "muskan me"
42
  ],
43
  "adars_fpv": [
44
  "adarsh",
 
55
  "anisha",
56
  "kafle anisha"
57
  ],
58
+ "_prabha__gurung_": [
59
+ "prabha gurung",
60
+ "gurung prabha",
61
+ "prabha"
62
+ ],
63
  "diwasg": [
64
  "diwash gurung",
65
  "diwash",
 
71
  "grubfood",
72
  "grub food"
73
  ],
74
+ "ggkaam": [
75
+ "ggkaam",
76
+ "gg kaam",
77
+ "gg_kaam"
78
+ ],
79
+ "gracebhattarai": [
80
+ "grace bhattarai",
81
+ "bhattarai grace",
82
+ "grace"
83
+ ],
84
+ "ishtu_karkee": [
85
+ "istu karki",
86
+ "istu karkee",
87
+ "ishtu karki",
88
+ "ishtu karkee",
89
+ "ishtoo karkee",
90
+ "ishtu",
91
+ "istu"
92
  ],
93
  "jholeyism": [
94
  "jholey",
 
96
  "jholay",
97
  "jhuley"
98
  ],
99
+ "mrbvlog2.0": [
100
+ "mrb vlog",
101
  "mrb vlogs",
 
102
  "mr b vlog"
103
+ ],
104
+ "nepal.food": [
105
+ "nepal food",
106
+ "nepalfood",
107
+ "food nepal"
108
+ ],
109
+ "nnzeella_shrestha": [
110
+ "nizella shrestha",
111
+ "neezella shrestha",
112
+ "shrestha nizella"
113
+ ],
114
+ "pramuditaaudas": [
115
+ "pramudita audas",
116
+ "audash pramudita",
117
+ "pramudita"
118
+ ],
119
+ "rabindra_dhant1": [
120
+ "rabindra dhant",
121
+ "dhant rabindra",
122
+ "rabindra"
123
+ ],
124
+ "razee.maharjan": [
125
+ "razee maharjan",
126
+ "maharjan razee",
127
+ "razee"
128
+ ],
129
+ "siddinathapa": [
130
+ "siddina thapa",
131
+ "siddina",
132
+ "thapa siddina"
133
+ ],
134
+ "sjpoon_official": [
135
+ "sjpoon",
136
+ "sj poon",
137
+ "poon sj"
138
+ ],
139
+ "tashyilha_g": [
140
+ "tashyil ha g",
141
+ "tashyil",
142
+ "tashyil g"
143
  ]
144
  }
src/genai/analytics_chatbot/utils/nodes.py CHANGED
@@ -1,8 +1,8 @@
1
  import requests
2
  from langchain_core.messages import SystemMessage , HumanMessage , FunctionMessage
3
  from .state import State
4
- from .schemas import ResponseFormatter
5
- from .prompts import chatbot_prompt
6
  from .utils import generate_api_knowledge
7
  from src.genai.utils.models_loader import llm_groq
8
 
@@ -27,6 +27,7 @@ class ChatbotNode:
27
  return {
28
  "messages": [{"role": "assistant", "content": f'''The endpoint is: {result.endpoint}. The parameters are: {result.parameters}'''}],
29
  "endpoint": result.endpoint,
 
30
  "parameters": result.parameters,
31
  }
32
 
@@ -40,8 +41,29 @@ class FetchDataNode:
40
  }
41
 
42
  def run(self, state:State):
 
43
  url = f'''{self.base_url}{state['endpoint']}'''
44
- response = requests.get(url, params=state['parameters'],headers=self.headers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  return {'response':response.json()}
46
 
47
 
 
 
1
  import requests
2
  from langchain_core.messages import SystemMessage , HumanMessage , FunctionMessage
3
  from .state import State
4
+ from .schemas import ResponseFormatter , InfluencerNames
5
+ from .prompts import chatbot_prompt , get_inf_name_prompt
6
  from .utils import generate_api_knowledge
7
  from src.genai.utils.models_loader import llm_groq
8
 
 
27
  return {
28
  "messages": [{"role": "assistant", "content": f'''The endpoint is: {result.endpoint}. The parameters are: {result.parameters}'''}],
29
  "endpoint": result.endpoint,
30
+ "method": result.method,
31
  "parameters": result.parameters,
32
  }
33
 
 
41
  }
42
 
43
  def run(self, state:State):
44
+ print('Entered to fetch data')
45
  url = f'''{self.base_url}{state['endpoint']}'''
46
+ if state['method'] == 'GET':
47
+ response = requests.get(url, params=state['parameters'],headers=self.headers)
48
+ elif state['endpoint'] == '/api/v1/compare/':
49
+ print('Condition satisfied')
50
+ messages = [SystemMessage(content=get_inf_name_prompt()),
51
+ HumanMessage(content=f'''The dictionary of parameters is: {state['parameters']}''')]
52
+ response=llm_groq.with_structured_output(InfluencerNames).invoke(messages)
53
+ payload = {
54
+ "usernames": response.names,
55
+ "freq": state['parameters']['frequency']
56
+ }
57
+
58
+ print('The payload is:',payload)
59
+
60
+ headers = {
61
+ "Content-Type": "application/json"
62
+ }
63
+
64
+ response = requests.post(url, json=payload, headers=headers)
65
+
66
  return {'response':response.json()}
67
 
68
 
69
+
src/genai/analytics_chatbot/utils/prompts.py CHANGED
@@ -3,6 +3,7 @@ def chatbot_prompt():
3
  You are an intelligent assistant whose task is to route user queries to the correct API endpoint.
4
  You have access to the API knowledge base, which contains information about each endpoint:
5
  - The endpoint path
 
6
  - Its required parameters
7
  - A description of what the endpoint does
8
 
@@ -14,6 +15,7 @@ Your job is to:
14
  5. Return the result in a **strict JSON format** exactly like this:
15
 
16
  "endpoint": "<chosen endpoint path>",
 
17
  "parameters":
18
  "<param1>": "<value1>",
19
  "<param2>": "<value2>"
@@ -21,7 +23,7 @@ Your job is to:
21
  Important instructions:
22
  - Only return endpoints that exist in the API knowledge base.
23
  - Include all required parameters for the endpoint.
24
- - If a parameter is not specified in the user's query, return it as null.
25
  - Do not add any extra explanation or text; return **only the JSON**.
26
  - The API knowledge base will be provided as a separate function message.
27
 
@@ -31,6 +33,7 @@ API knowledge: contains endpoint "/overview/buzz_trend" with parameters ["period
31
  Expected output:
32
 
33
  "endpoint": "/api/v1/overview/buzz_trend",
 
34
  "parameters":
35
  "period": "monthly",
36
  "influencer_username": "John"
@@ -38,3 +41,8 @@ Expected output:
38
  Your response must always follow this exact JSON format.
39
  """
40
 
 
 
 
 
 
 
3
  You are an intelligent assistant whose task is to route user queries to the correct API endpoint.
4
  You have access to the API knowledge base, which contains information about each endpoint:
5
  - The endpoint path
6
+ -The method 'GET' or 'POST'
7
  - Its required parameters
8
  - A description of what the endpoint does
9
 
 
15
  5. Return the result in a **strict JSON format** exactly like this:
16
 
17
  "endpoint": "<chosen endpoint path>",
18
+ "method": GET or POST
19
  "parameters":
20
  "<param1>": "<value1>",
21
  "<param2>": "<value2>"
 
23
  Important instructions:
24
  - Only return endpoints that exist in the API knowledge base.
25
  - Include all required parameters for the endpoint.
26
+ - If the parameter or method is not specified in the user's query, return it as null.
27
  - Do not add any extra explanation or text; return **only the JSON**.
28
  - The API knowledge base will be provided as a separate function message.
29
 
 
33
  Expected output:
34
 
35
  "endpoint": "/api/v1/overview/buzz_trend",
36
+ "method": GET
37
  "parameters":
38
  "period": "monthly",
39
  "influencer_username": "John"
 
41
  Your response must always follow this exact JSON format.
42
  """
43
 
44
+ def get_inf_name_prompt():
45
+ return'''You are given with a dictionary called parameters.
46
+ Your task is to just extract the names of influencers from as it is from the values of that dictionary.
47
+ Extract the names in the form of list.
48
+ '''
src/genai/analytics_chatbot/utils/schemas.py CHANGED
@@ -2,4 +2,8 @@ from pydantic import BaseModel, Field
2
 
3
  class ResponseFormatter(BaseModel):
4
  endpoint: str = Field(description='Return the exact endpoint from the knowledge base of endpoints.')
5
- parameters: dict = Field(description='Return the dictionary of parameters to pass to the endpoint.')
 
 
 
 
 
2
 
3
  class ResponseFormatter(BaseModel):
4
  endpoint: str = Field(description='Return the exact endpoint from the knowledge base of endpoints.')
5
+ method: str = Field(description='Return the exact request type from the knowledge base of endpoints. GET or POST ')
6
+ parameters: dict = Field(description='Return the dictionary of parameters to pass to the endpoint.')
7
+
8
+ class InfluencerNames(BaseModel):
9
+ names: list
src/genai/analytics_chatbot/utils/state.py CHANGED
@@ -5,6 +5,7 @@ from langgraph.graph.message import add_messages
5
  class State(TypedDict):
6
  messages: Annotated[list, add_messages]
7
  endpoint: str
 
8
  parameters: dict
9
  response:dict
10
  error_message:str
 
5
  class State(TypedDict):
6
  messages: Annotated[list, add_messages]
7
  endpoint: str
8
+ method: str
9
  parameters: dict
10
  response:dict
11
  error_message:str
src/genai/analytics_chatbot/utils/streamlit_app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+
4
+ # Your FastAPI endpoint
5
+ API_URL = "http://localhost:8000/analytics-chatbot" # change if deployed
6
+
7
+ st.set_page_config(page_title="Analytics Chatbot", page_icon="πŸ€–", layout="centered")
8
+
9
+ st.title("πŸ“Š Analytics Chatbot")
10
+
11
+ # Initialize chat history
12
+ if "messages" not in st.session_state:
13
+ st.session_state["messages"] = []
14
+
15
+ # Display previous messages
16
+ for chat in st.session_state["messages"]:
17
+ role = "πŸ§‘ You" if chat["role"] == "user" else "πŸ€– Bot"
18
+ with st.chat_message(chat["role"]):
19
+ st.markdown(f"**{role}:** {chat['content']}")
20
+
21
+ # User input box
22
+ if prompt := st.chat_input("Ask something about analytics..."):
23
+ # Store user message
24
+ st.session_state["messages"].append({"role": "user", "content": prompt})
25
+
26
+ # Show user message
27
+ with st.chat_message("user"):
28
+ st.markdown(f"**πŸ§‘ You:** {prompt}")
29
+
30
+ try:
31
+ # Call FastAPI backend
32
+ response = requests.post(API_URL, json={"message": prompt})
33
+
34
+ if response.status_code == 200:
35
+ data = response.json()
36
+ bot_reply = f"**Endpoint:** {data['endpoint']}\n\n" \
37
+ f"**Parameters:** `{data['parameters']}`\n\n" \
38
+ f"**Response:** {data['response']}"
39
+ else:
40
+ bot_reply = f"⚠️ Error {response.status_code}: {response.text}"
41
+
42
+ except Exception as e:
43
+ bot_reply = f"❌ Failed to connect to API: {e}"
44
+
45
+ # Store bot message
46
+ st.session_state["messages"].append({"role": "assistant", "content": bot_reply})
47
+
48
+ # Show bot reply
49
+ with st.chat_message("assistant"):
50
+ st.markdown(f"**πŸ€– Bot:**\n\n{bot_reply}")
src/genai/analytics_chatbot/utils/utils.py CHANGED
@@ -4,7 +4,7 @@ import re
4
 
5
  def generate_api_knowledge(base_url: str):
6
  """
7
- Fetches FastAPI OpenAPI docs and summarizes endpoints, parameters, and descriptions.
8
  Returns a JSON-friendly Python list of dicts, excluding the root endpoint.
9
  """
10
  resp = requests.get(f"{base_url}/openapi.json")
@@ -39,6 +39,7 @@ def generate_api_knowledge(base_url: str):
39
 
40
  api_knowledge.append({
41
  "endpoint": endpoint,
 
42
  "parameters": params,
43
  "description": description
44
  })
 
4
 
5
  def generate_api_knowledge(base_url: str):
6
  """
7
+ Fetches FastAPI OpenAPI docs and summarizes endpoints, methods, parameters, and descriptions.
8
  Returns a JSON-friendly Python list of dicts, excluding the root endpoint.
9
  """
10
  resp = requests.get(f"{base_url}/openapi.json")
 
39
 
40
  api_knowledge.append({
41
  "endpoint": endpoint,
42
+ "method": method.upper(),
43
  "parameters": params,
44
  "description": description
45
  })
src/genai/orchestration_agent/__pycache__/agent.cpython-313.pyc CHANGED
Binary files a/src/genai/orchestration_agent/__pycache__/agent.cpython-313.pyc and b/src/genai/orchestration_agent/__pycache__/agent.cpython-313.pyc differ
 
src/genai/orchestration_agent/agent.py CHANGED
@@ -1,8 +1,8 @@
1
  from langgraph.graph import StateGraph, MessagesState, START, END
2
  from langgraph.checkpoint.memory import MemorySaver
3
- from .utils.nodes import ToolReturnNode, ExtractUserReferenceNode
4
  from src.genai.utils.models_loader import llm_gpt
5
- from .utils.state import ValidationFormatter
6
  from .utils.utils import ImageCaptioner, ResponseBlockExtractor
7
  from .utils.tools import InfluencerRetrievalTool
8
 
@@ -16,49 +16,24 @@ class OrchestrationAgent:
16
  self.user_input_history=[]
17
 
18
  def orchestration_graph(self):
19
- workflow = StateGraph(MessagesState)
20
- workflow.add_node("chatbot1", ToolReturnNode().run)
21
- workflow.add_node("chatbot2", ExtractUserReferenceNode().run)
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- workflow.add_edge(START, "chatbot1")
24
- workflow.add_edge('chatbot1', "chatbot2")
25
- workflow.add_edge('chatbot2', END)
26
  return workflow.compile(checkpointer=self.memory)
27
 
28
- def trim_history(self):
29
- if len(self.user_input_history)>4:
30
- self.user_input_history=self.user_input_history[-2:]
31
- print('Length of history', len(self.user_input_history))
32
- query_for_retrieval = ' '.join(
33
- [msg['content'] for msg in self.user_input_history if msg['role'] in ('human', 'image_caption')])
34
- return query_for_retrieval
35
-
36
- def caption_image(self,image_base64,user_input):
37
- if len(image_base64)>0:
38
- caption_response = ImageCaptioner().caption_image(image_base64,user_input)
39
- self.user_input_history.append({'role': 'image_caption', 'content': caption_response})
40
-
41
- print('Caption Response:', caption_response)
42
- else:
43
- caption_response =''
44
- return caption_response
45
-
46
-
47
- def chat(self,user_input: str, image_base64=[]):
48
- print('Message Chunk:')
49
- self.user_input_history.append({'role': 'human', 'content': user_input})
50
- caption_response = self.caption_image(image_base64,user_input)
51
- query_for_retrieval= self.trim_history()
52
-
53
- influencers_data = InfluencerRetrievalTool().retrieve_for_orchestration(query_for_retrieval)
54
 
55
- config = {"configurable": {"thread_id": "orchestration-thread"}}
56
- response = self.agent.invoke({"messages": [{'role':'human','content':user_input},
57
- {'role': 'function', 'name': 'data_of_influencers', 'content': influencers_data},
58
- {'role':'function','name':'information_of_image','content':caption_response}]}, config)['messages']
59
- print('Orchestrator Response', response)
60
- response=llm_gpt.with_structured_output(ValidationFormatter).invoke(ResponseBlockExtractor(response).extract_latest())
61
- return response
62
 
63
 
64
 
 
1
  from langgraph.graph import StateGraph, MessagesState, START, END
2
  from langgraph.checkpoint.memory import MemorySaver
3
+ from .utils.nodes import ToolReturnNode, ExtractUserReferenceNode , ImageCaptionNode , QueryResponsenNode
4
  from src.genai.utils.models_loader import llm_gpt
5
+ from .utils.state import State
6
  from .utils.utils import ImageCaptioner, ResponseBlockExtractor
7
  from .utils.tools import InfluencerRetrievalTool
8
 
 
16
  self.user_input_history=[]
17
 
18
  def orchestration_graph(self):
19
+ workflow = StateGraph(State)
20
+ workflow.add_node("image_caption", ImageCaptionNode().run)
21
+ workflow.add_node("tool_return", ToolReturnNode().run)
22
+ workflow.add_node("query_response", QueryResponsenNode().run)
23
+ workflow.add_node("extract_reference", ExtractUserReferenceNode().run)
24
+
25
+ workflow.add_edge(START,"image_caption")
26
+ workflow.add_edge(START, "tool_return")
27
+ workflow.add_edge(START, "extract_reference")
28
+
29
+ workflow.add_edge('image_caption', "query_response")
30
+ workflow.add_edge('tool_return', "query_response")
31
+ workflow.add_edge('extract_reference', 'query_response')
32
+ workflow.add_edge('query_response', END)
33
 
 
 
 
34
  return workflow.compile(checkpointer=self.memory)
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
 
 
 
 
 
 
 
37
 
38
 
39
 
src/genai/orchestration_agent/utils/__pycache__/nodes.cpython-313.pyc CHANGED
Binary files a/src/genai/orchestration_agent/utils/__pycache__/nodes.cpython-313.pyc and b/src/genai/orchestration_agent/utils/__pycache__/nodes.cpython-313.pyc differ
 
src/genai/orchestration_agent/utils/__pycache__/prompts.cpython-313.pyc CHANGED
Binary files a/src/genai/orchestration_agent/utils/__pycache__/prompts.cpython-313.pyc and b/src/genai/orchestration_agent/utils/__pycache__/prompts.cpython-313.pyc differ
 
src/genai/orchestration_agent/utils/nodes.py CHANGED
@@ -1,7 +1,47 @@
1
- from .prompts import tool_return_prompt , extract_user_reference_prompt
2
- from langchain_core.messages import SystemMessage, HumanMessage
3
  from src.genai.utils.models_loader import llm_gpt
4
- from .state import ToolResponseFormatter, UserReferenceResponseFormatter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  class ToolReturnNode:
7
  """Node for determining which tools to use based on user messages."""
@@ -9,12 +49,32 @@ class ToolReturnNode:
9
  def __init__(self, llm=llm_gpt):
10
  self.llm = llm
11
 
12
- def run(self, state):
13
  if len(state["messages"]) > 23:
14
  state["messages"] = state["messages"][-18:]
15
  template = [SystemMessage(content=tool_return_prompt)] + state["messages"]
16
  response = self.llm.with_structured_output(ToolResponseFormatter).invoke(template)
17
- return {"messages": [{'role': 'assistant', 'content': f"The exact name of the tool is: {response}"}]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
 
20
  class ExtractUserReferenceNode:
@@ -28,13 +88,52 @@ class ExtractUserReferenceNode:
28
  (msg for msg in reversed(state['messages']) if isinstance(msg, HumanMessage)),
29
  None
30
  )
 
31
  template = [SystemMessage(content=extract_user_reference_prompt),
32
  HumanMessage(content=latest_human_message.content)]
33
  response = self.llm.with_structured_output(UserReferenceResponseFormatter).invoke(template)
34
- return {'messages': [{
35
- 'role': 'assistant',
36
- 'content': f"The video idea is: {response.video_idea} and the video story is: {response.video_story}"
37
- }]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
 
40
 
 
1
+ from .prompts import tool_return_prompt , extract_user_reference_prompt , query_response_prompt,captioning_prompt
2
+ from langchain_core.messages import SystemMessage, HumanMessage, FunctionMessage
3
  from src.genai.utils.models_loader import llm_gpt
4
+ from src.genai.utils.base_endpoint import base_url
5
+ from .state import State
6
+ from .tools import InfluencerRetrievalTool
7
+ from .schemas import ToolResponseFormatter , UserReferenceResponseFormatter
8
+ import os
9
+ import requests
10
+ from groq import Groq
11
+
12
+ retriever=InfluencerRetrievalTool()
13
+
14
+ class ImageCaptionNode:
15
+ def __init__(self, api_key=os.environ.get('GROQ_API_KEY')):
16
+ self.client = Groq(api_key=api_key)
17
+
18
+ def run(self,state:State):
19
+ if len(state['image_base64'])>0:
20
+ print('Captioning image')
21
+ chat_completion = self.client.chat.completions.create(
22
+ messages=[
23
+ {
24
+ "role": "user",
25
+ "content": [
26
+ {"type": "text", "text": captioning_prompt(state['messages'])},
27
+ {
28
+ "type": "image_url",
29
+ "image_url": {
30
+ "url": f"data:image/jpg;base64,{state['image_base64[-1]']}",
31
+ },
32
+ },
33
+ ],
34
+ }
35
+ ],
36
+ model="meta-llama/llama-4-scout-17b-16e-instruct",
37
+ max_completion_tokens=50,
38
+ temperature = 1
39
+ )
40
+ response=chat_completion.choices[0].message.content
41
+ return {'image_caption': response}
42
+ else:
43
+ return {'image_caption':None}
44
+
45
 
46
  class ToolReturnNode:
47
  """Node for determining which tools to use based on user messages."""
 
49
  def __init__(self, llm=llm_gpt):
50
  self.llm = llm
51
 
52
+ def run(self, state:State):
53
  if len(state["messages"]) > 23:
54
  state["messages"] = state["messages"][-18:]
55
  template = [SystemMessage(content=tool_return_prompt)] + state["messages"]
56
  response = self.llm.with_structured_output(ToolResponseFormatter).invoke(template)
57
+ print('The response is:', response)
58
+ return {"messages": [{'role': 'assistant', 'content': f"Tool invoked: {response.tools}"}],
59
+ "tools":response.tools}
60
+
61
+
62
+ class QueryResponsenNode:
63
+ def __init__(self):
64
+ self.llm = llm_gpt
65
+
66
+ def run(self,state:State):
67
+ if len(state['tools'])<1:
68
+ retrieved_data=retriever.retrieve_for_orchestration(state['messages'])
69
+ template = [SystemMessage(content=query_response_prompt),
70
+ FunctionMessage(name='inf-data-retrieval',content=retrieved_data)] + state["messages"]
71
+ response = self.llm.invoke(template)
72
+ return {"messages": [{'role': 'assistant', 'content': response.content}],
73
+ "query_response":response.content}
74
+ else:
75
+ return{
76
+ "query_response": f'''Okay i will perform {" ".join(state['tools'])} for you.'''
77
+ }
78
 
79
 
80
  class ExtractUserReferenceNode:
 
88
  (msg for msg in reversed(state['messages']) if isinstance(msg, HumanMessage)),
89
  None
90
  )
91
+ print('Latest human message:', latest_human_message)
92
  template = [SystemMessage(content=extract_user_reference_prompt),
93
  HumanMessage(content=latest_human_message.content)]
94
  response = self.llm.with_structured_output(UserReferenceResponseFormatter).invoke(template)
95
+ return{
96
+ 'video_idea': response.video_idea,
97
+ 'video_story': response.video_story
98
+ }
99
+
100
+
101
+ class InvokeToolNode:
102
+ def __init__(self):
103
+ self.base_url = base_url
104
+ self.headers = {
105
+ "Authorization": "Bearer YOUR_API_KEY", # replace with your API key if needed
106
+ "Content-Type": "application/json"
107
+ }
108
+
109
+ def run(self,state:State):
110
+ latest_human_message = next(
111
+ (msg for msg in reversed(state['messages']) if isinstance(msg, HumanMessage)),
112
+ None
113
+ )
114
+ data_to_return=[]
115
+ for tool in state['tools']:
116
+ if 'analytics' in tool:
117
+ url = f'''{self.base_url}{tool}'''
118
+ response = requests.get(url, params=latest_human_message.content,headers=self.headers)
119
+ return {
120
+ 'analytics_response':response.json()
121
+ }
122
+
123
+
124
+
125
+
126
+
127
+
128
+
129
+
130
+
131
+
132
+
133
+
134
+
135
+
136
+
137
 
138
 
139
 
src/genai/orchestration_agent/utils/prompts.py CHANGED
@@ -1,19 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
- tool_return_prompt = """
3
  You are an AI orchestration agent and a friendly assistant built to help businesses and brands find influencers and generate content ideas, stories, and visuals.
4
 
5
  Your job is to:
6
  1. **Read the user's message carefully** and identify their intent.
7
  2. **Return exactly two things**:
8
- - `tools`: a Python-style list of tool names (from the ordered list below) that match the user’s intent.
9
- - `query_response`: a short, friendly, and helpful reply that aligns with the tools you've selected. Also return the relevant data of influencers aligning with the user query only if the user asks about the influencers.
10
 
11
- Your `tools` output must:
12
  - Always be a **Python-style list**, even if only one tool is selected.
13
  - **Respect the sequence** of tools listed below. If more than one tool is needed, always list them in this order (regardless of how the user wrote them).
14
  - Only include tools that are strictly necessary.
15
 
16
- Your `query_response` must:
17
  - Always match and acknowledge the user's request.
18
  - Be aligned with the selected tools.
19
  - Never say something is impossible if a tool exists for it.
@@ -44,7 +89,7 @@ Your `query_response` must:
44
  4. **generate-ultimate-story** β†’ Trigger only if:
45
  - The user is asking for the **final**, polished, or complete story
46
  - And has already done previous story brainstorming
47
- - Never trigger this tool alone unless `generate-story` has been used before
48
 
49
  5. **generate-image** β†’ Trigger if the user:
50
  - Wants a visual, image, or scene created from the final story
@@ -56,19 +101,19 @@ Your `query_response` must:
56
  - If a user says:
57
  > β€œGenerate story based on idea 4”
58
  This means they selected/liked idea 4 β†’ so return:
59
- `"tools": ["human-idea-refining", "generate-story"]`
60
 
61
  - If a user says:
62
  > β€œCreate an image of the final script”
63
- β†’ Return: `["generate-image"]`
64
 
65
  - If a user says:
66
  > β€œI want a final version of this story”
67
- β†’ Return: `["generate-ultimate-story"]` (but only if `generate-story` was used before)
68
 
69
- - If a user gives feedback or says β€œimprove this idea” β†’ Return `["human-idea-refining"]`
70
 
71
- - If a user just wants help with finding influencers or advice β†’ Return `"tools": []` but still respond helpfully via `query_response`.
72
  - Again reminding you the most important part. If you generate more than one tools, **Please** generate them in the sequence above provided. For eg: Don't give **human-idea-refining** after **ideation**. The list of tools have to be in the proper order provided above.
73
  ---
74
 
@@ -85,8 +130,6 @@ You're also an intelligent assistant for brands looking to collaborate with infl
85
 
86
  """
87
 
88
-
89
-
90
  extract_user_reference_prompt = """
91
  You are an information extractor, NOT a creative assistant. Your ONLY job is to extract video ideas and video stories from user queries **if and only if** they are explicitly written by the user. Do not create, generate, or imagine anything on your own.
92
 
 
1
+ tool_return_prompt= """
2
+ You are an perfect orchestration agent that
3
+ - grabs the intention of the user and determines which of the tools are they trying to access.
4
+
5
+ For every user query, return:
6
+ tools: A Python list of tool names (from the ordered list below).
7
+
8
+ Names of tools:
9
+ 1. ideation β†’ Generate fresh video ideas, or if user asks again for new ideas.
10
+ 2. human-idea-refining β†’ If user likes/locks ideas, gives feedback, wants to improve/merge/edit, or refers to a previous idea. Trigger until user confirms final choice. Undo/going back also counts as refining. Never follow this with ideation.
11
+ 3. generate-story β†’ If user wants to create a story, brainstorm the story , or plot. Based on refined idea.
12
+ 4. generate-ultimate-story β†’ Only if user requests a final/polished version AND `generate-story` has been used.
13
+ 5. generate-image β†’ If user wants visuals/images/scenes from final story.
14
+ 6. analytics β†’ If user asks for influencer analytics, metrics, stats, comparisons, or insights. Examples:
15
+ - β€œInfo about influencer X”
16
+ - β€œSentiment of influencer X’s comments”
17
+ - β€œTop collaborators of X”
18
+ - β€œCompare X vs Y weekly analytics”
19
+ - β€œEngagement of influencer X”
20
+ - β€œBuzz of influencer X”
21
+
22
+ Rules
23
+ - Never return the name of tool that doesn't exists in the tool names.
24
+ - Always output tools as a Python list (even if one).
25
+ - Only include needed tools with their exact names.
26
+ - If multiple tools, respect sequence (e.g., `["human-idea-refining", "generate-story"]`).
27
+ - If query is not about any tool calling (like normal questions) then return empty list in tools. β†’ `"tools": []`.
28
+ - If about influencer analytics β†’ `"tools": ["analytics"]`.
29
+
30
+ Clarifications
31
+ - β€œI liked idea 4. Generate story based on idea 4” β†’ `['human-idea-refining',"generate-story"]`
32
+ - β€œCreate image of final script” β†’ `["generate-image"]`
33
+ - β€œFinal version of story” β†’ `["generate-ultimate-story"]` (only if story already generated)
34
+ - β€œImprove this idea” β†’ `["human-idea-refining"]`
35
+ - Analytics requests β†’ `["analytics"]`
36
+
37
+ Output Format
38
+ "tools": ["tool_1", "tool_2"],
39
+ """
40
+
41
+ query_response_prompt="""
42
+ You are an AI orchestration agent and a friendly assistant built to help businesses and brands find influencers and generate content ideas, stories, and visuals.
43
+ Respond to the user's query in polite way.
44
+ You are passed with the data of influencers from the function message. Use that data to give the response.
45
+ """
46
 
47
+ tool_return_prompt_old = """
48
  You are an AI orchestration agent and a friendly assistant built to help businesses and brands find influencers and generate content ideas, stories, and visuals.
49
 
50
  Your job is to:
51
  1. **Read the user's message carefully** and identify their intent.
52
  2. **Return exactly two things**:
53
+ - tools: a Python-style list of tool names (from the ordered list below) that match the user’s intent.
54
+ - query_response: a short, friendly, and helpful reply that aligns with the tools you've selected. Also return the relevant data of influencers aligning with the user query only if the user asks about the influencers.
55
 
56
+ Your tools output must:
57
  - Always be a **Python-style list**, even if only one tool is selected.
58
  - **Respect the sequence** of tools listed below. If more than one tool is needed, always list them in this order (regardless of how the user wrote them).
59
  - Only include tools that are strictly necessary.
60
 
61
+ Your query_response must:
62
  - Always match and acknowledge the user's request.
63
  - Be aligned with the selected tools.
64
  - Never say something is impossible if a tool exists for it.
 
89
  4. **generate-ultimate-story** β†’ Trigger only if:
90
  - The user is asking for the **final**, polished, or complete story
91
  - And has already done previous story brainstorming
92
+ - Never trigger this tool alone unless generate-story has been used before
93
 
94
  5. **generate-image** β†’ Trigger if the user:
95
  - Wants a visual, image, or scene created from the final story
 
101
  - If a user says:
102
  > β€œGenerate story based on idea 4”
103
  This means they selected/liked idea 4 β†’ so return:
104
+ "tools": ["human-idea-refining", "generate-story"]
105
 
106
  - If a user says:
107
  > β€œCreate an image of the final script”
108
+ β†’ Return: ["generate-image"]
109
 
110
  - If a user says:
111
  > β€œI want a final version of this story”
112
+ β†’ Return: ["generate-ultimate-story"] (but only if generate-story was used before)
113
 
114
+ - If a user gives feedback or says β€œimprove this idea” β†’ Return ["human-idea-refining"]
115
 
116
+ - If a user just wants help with finding influencers or advice β†’ Return "tools": [] but still respond helpfully via query_response.
117
  - Again reminding you the most important part. If you generate more than one tools, **Please** generate them in the sequence above provided. For eg: Don't give **human-idea-refining** after **ideation**. The list of tools have to be in the proper order provided above.
118
  ---
119
 
 
130
 
131
  """
132
 
 
 
133
  extract_user_reference_prompt = """
134
  You are an information extractor, NOT a creative assistant. Your ONLY job is to extract video ideas and video stories from user queries **if and only if** they are explicitly written by the user. Do not create, generate, or imagine anything on your own.
135
 
src/genai/orchestration_agent/utils/schemas.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+ from typing import List
3
+
4
+ class ToolResponseFormatter(BaseModel):
5
+ tools: List[str] = Field(description="Returns the name of the tool, tools, or an empty list.")
6
+
7
+ class UserReferenceResponseFormatter(BaseModel):
8
+ video_idea: str = Field(description="Returns the video idea as it is, otherwise return null")
9
+ video_story: str = Field(description="Returns the video story as it is, otherwise return null")
10
+
11
+ class ValidationFormatter(BaseModel):
12
+ tool: List[str] = Field(description="Returns the name of the tool or tools as it is.")
13
+ query_response: str = Field(description="Returns the reply of query as it is.")
14
+ image_caption: str = Field(description="Returns the information of image as it is.")
15
+ video_idea: str = Field(description="Returns the video idea as it is.")
16
+ video_story: str = Field(description="Returns the video story as it is.")
src/genai/orchestration_agent/utils/state.py CHANGED
@@ -1,20 +1,15 @@
1
- from pydantic import BaseModel, Field
2
- from typing import List
3
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- class ToolResponseFormatter(BaseModel):
6
- tool: List[str] = Field(description="Returns the name of the tool, tools, or an empty list.")
7
- query_response: str = Field(description="Returns the response of the user query.")
8
 
9
-
10
-
11
- class UserReferenceResponseFormatter(BaseModel):
12
- video_idea: str = Field(description="Returns the video idea as it is, otherwise return null")
13
- video_story: str = Field(description="Returns the video story as it is, otherwise return null")
14
-
15
- class ValidationFormatter(BaseModel):
16
- tool: List[str] = Field(description="Returns the name of the tool or tools as it is.")
17
- query_response: str = Field(description="Returns the reply of query as it is.")
18
- image_caption: str = Field(description="Returns the information of image as it is.")
19
- video_idea: str = Field(description="Returns the video idea as it is.")
20
- video_story: str = Field(description="Returns the video story as it is.")
 
1
+ from typing import List , TypedDict , Annotated
2
+ from langgraph.graph.message import add_messages
3
 
4
+ class State(TypedDict):
5
+ messages: Annotated[list, add_messages]
6
+ tools: List[str]
7
+ query_response: str
8
+ video_idea: str
9
+ video_story: str
10
+ image_caption: str
11
+ image_base64: list
12
+ analytics_response: dict
13
+ final_story:str
14
 
 
 
 
15
 
 
 
 
 
 
 
 
 
 
 
 
 
src/genai/orchestration_agent/utils/tools.py CHANGED
@@ -28,8 +28,11 @@ class InfluencerRetrievalTool:
28
  'commentCount': int(row['commentCount']) if pd.notnull(row['commentCount']) else None
29
  })
30
  return results
31
-
32
  def retrieve_for_orchestration(self, query):
 
 
 
33
  query_embedding = np.array(embedding_model.embed_query(str(query))).reshape(1, -1).astype('float32')
34
  faiss.normalize_L2(query_embedding)
35
  distances, indices = self.index.search(query_embedding, len(self.df))
 
28
  'commentCount': int(row['commentCount']) if pd.notnull(row['commentCount']) else None
29
  })
30
  return results
31
+
32
  def retrieve_for_orchestration(self, query):
33
+ return 'Influencer Divya dhakal has got 66 likes.'
34
+
35
+ def retrieve_for_orchestration_old(self, query):
36
  query_embedding = np.array(embedding_model.embed_query(str(query))).reshape(1, -1).astype('float32')
37
  faiss.normalize_L2(query_embedding)
38
  distances, indices = self.index.search(query_embedding, len(self.df))
src/genai/orchestration_agent/utils/utils.py CHANGED
@@ -72,3 +72,19 @@ class ResponseBlockExtractor:
72
  return latest_block
73
 
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  return latest_block
73
 
74
 
75
+ def handle_tools(tools_list,stored_data):
76
+ tools_order = [
77
+ "analytics",
78
+ "ideation",
79
+ "human-idea-refining",
80
+ "generate-story",
81
+ "generate-ultimate-story",
82
+ "generate-image",
83
+ ]
84
+ if 'generate-story' in tools_list and len(stored_data['human_ideation_interactions'])<1:
85
+ tools_list.append('human-idea-refining')
86
+ if 'generate-ultimate-story' in tools_list and len(stored_data['brainstorming_response'])<1:
87
+ tools_list.append('generate-story')
88
+
89
+ tools_list = [tool for tool in tools_order if tool in tools_list]
90
+ return tools_list
src/genai/utils/__pycache__/models_loader.cpython-313.pyc CHANGED
Binary files a/src/genai/utils/__pycache__/models_loader.cpython-313.pyc and b/src/genai/utils/__pycache__/models_loader.cpython-313.pyc differ
 
src/genai/utils/base_endpoint.py ADDED
@@ -0,0 +1 @@
 
 
1
+ base_url = 'http://127.0.0.1:8000/api/'
src/genai/utils/models_loader.py CHANGED
@@ -17,22 +17,23 @@ embedding_model = OpenAIEmbeddings(model="text-embedding-3-small", dimensions=15
17
  llm_anthropic = ChatAnthropic(model='claude-3-7-sonnet-latest', temperature=1)
18
  llm_gemini = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
19
  llm_groq_openai = ChatGroq(model="openai/gpt-oss-120b",temperature=0.7)
20
- llm_groq = ChatGroq(model="llama-3.1-8b-instant",temperature=0.7)
21
 
22
- llm_gpt = ChatOpenAI(model="gpt-4o-mini",temperature=0.3)
23
- llm_gpt_high = ChatOpenAI(model="gpt-4o-mini",temperature=0.5)
 
24
 
25
  captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct"
26
  image_generation_model = "black-forest-labs/FLUX.1-schnell"
27
 
28
- ideator_llm = llm_groq_openai
29
- moderator_llm = llm_groq_openai
30
- critic_llm = llm_groq
31
- simplifier_llm = llm_groq
32
- normalizer_llm = llm_groq
33
- validator_llm = llm_groq
34
- judge1_llm = llm_groq
35
- judge2_llm = llm_groq
36
 
37
 
38
 
 
17
  llm_anthropic = ChatAnthropic(model='claude-3-7-sonnet-latest', temperature=1)
18
  llm_gemini = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
19
  llm_groq_openai = ChatGroq(model="openai/gpt-oss-120b",temperature=0.7)
20
+ llm_groq = ChatGroq(model="llama-3.3-70b-versatile",temperature=0)
21
 
22
+ llm_gpt_small = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
23
+ llm_gpt = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
24
+ llm_gpt_high = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.5)
25
 
26
  captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct"
27
  image_generation_model = "black-forest-labs/FLUX.1-schnell"
28
 
29
+ ideator_llm = llm_gpt_small
30
+ moderator_llm = llm_gpt_small
31
+ critic_llm = llm_gpt_small
32
+ simplifier_llm = llm_gpt_small
33
+ normalizer_llm = llm_gpt_small
34
+ validator_llm = llm_gpt_small
35
+ judge1_llm = llm_gpt_small
36
+ judge2_llm = llm_gpt_small
37
 
38
 
39
 
streamlit_app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+
4
+ # Your FastAPI endpoint
5
+ API_URL = "http://localhost:8000/api/analytics-chatbot" # change if deployed
6
+
7
+ st.set_page_config(page_title="Analytics Chatbot", page_icon="πŸ€–", layout="centered")
8
+
9
+ st.title("πŸ“Š Analytics Chatbot")
10
+
11
+ # Initialize chat history
12
+ if "messages" not in st.session_state:
13
+ st.session_state["messages"] = []
14
+
15
+ # Display previous messages
16
+ for chat in st.session_state["messages"]:
17
+ role = "πŸ§‘ You" if chat["role"] == "user" else "πŸ€– Bot"
18
+ with st.chat_message(chat["role"]):
19
+ st.markdown(f"**{role}:** {chat['content']}")
20
+
21
+ # User input box
22
+ if prompt := st.chat_input("Ask something about analytics..."):
23
+ # Store user message
24
+ st.session_state["messages"].append({"role": "user", "content": prompt})
25
+
26
+ # Show user message
27
+ with st.chat_message("user"):
28
+ st.markdown(f"**πŸ§‘ You:** {prompt}")
29
+
30
+ try:
31
+ # Call FastAPI backend
32
+ response = requests.post(API_URL, json={"message": prompt})
33
+
34
+ if response.status_code == 200:
35
+ bot_reply = response.json()
36
+ # bot_reply = f"**Endpoint:** {data['endpoint']}\n\n" \
37
+ # f"**Parameters:** `{data['parameters']}`\n\n" \
38
+ # f"**Response:** {data['response']}"
39
+ else:
40
+ bot_reply = f"⚠️ Error {response.status_code}: {response.text}"
41
+
42
+ except Exception as e:
43
+ bot_reply = f"❌ Failed to connect to API: {e}"
44
+
45
+ # Store bot message
46
+ st.session_state["messages"].append({"role": "assistant", "content": bot_reply['response']})
47
+
48
+ # Show bot reply
49
+ with st.chat_message("assistant"):
50
+ st.json(bot_reply['response'])
tests/test_analytics.py CHANGED
@@ -4,6 +4,6 @@ from api.main import app
4
  client = TestClient(app)
5
 
6
  def test_analytics():
7
- response = client.post("/api/show-analytics", json={})
8
  print(response.json())
9
  assert response.status_code == 200
 
4
  client = TestClient(app)
5
 
6
  def test_analytics():
7
+ response = client.get("/api/show-analytics")
8
  print(response.json())
9
  assert response.status_code == 200
tests/test_analytics_chatbot.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi.testclient import TestClient
2
+ from api.main import app
3
+
4
+ client = TestClient(app)
5
+
6
+ def test_context_analysis():
7
+ message = "I want to see the sentiment distribution of divya dhakal."
8
+
9
+ response = client.get("/api/analytics-chatbot",params=message)
10
+ assert response.status_code == 200
tests/test_final_story.py CHANGED
@@ -4,5 +4,5 @@ from api.main import app
4
  client = TestClient(app)
5
 
6
  def test_final_story_generation():
7
- response = client.post("/api/generate-final-story", json={})
8
  assert response.status_code == 200
 
4
  client = TestClient(app)
5
 
6
  def test_final_story_generation():
7
+ response = client.get("/api/generate-final-story")
8
  assert response.status_code == 200
tests/test_image_generation.py CHANGED
@@ -4,6 +4,6 @@ from api.main import app
4
  client = TestClient(app)
5
 
6
  def test_image_generation():
7
- response = client.post("/api/generate-image", json={})
8
  print(response.json())
9
  assert response.status_code == 200
 
4
  client = TestClient(app)
5
 
6
  def test_image_generation():
7
+ response = client.get("/api/generate-image")
8
  print(response.json())
9
  assert response.status_code == 200
tests/test_orchestrator.py CHANGED
@@ -6,7 +6,7 @@ client = TestClient(app)
6
 
7
  def test_orchestrator():
8
  payload = {
9
- "message":"Mention some popular influencers of nepal"
10
  }
11
  response = client.post("/api/orchestration", json=payload)
12
  print(response.json())
 
6
 
7
  def test_orchestrator():
8
  payload = {
9
+ "message":"Hello"
10
  }
11
  response = client.post("/api/orchestration", json=payload)
12
  print(response.json())