subashpoudel commited on
Commit
6b61df1
·
1 Parent(s): 92cf6e2

next commit

Browse files
logs/access.log CHANGED
@@ -1640,3 +1640,28 @@
1640
  2025-11-12 12:20:31,658 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/openapi.json
1641
  2025-11-12 12:20:31,659 | INFO | access_logger | app.py:20 | Response status: 200
1642
  2025-11-12 12:21:21,920 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20to%20view%20the%20overall%20audience%20analytics%20of%20divya
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1640
  2025-11-12 12:20:31,658 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/openapi.json
1641
  2025-11-12 12:20:31,659 | INFO | access_logger | app.py:20 | Response status: 200
1642
  2025-11-12 12:21:21,920 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20to%20view%20the%20overall%20audience%20analytics%20of%20divya
1643
+ 2025-11-12 16:40:57,113 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/
1644
+ 2025-11-12 16:40:57,114 | INFO | access_logger | app.py:20 | Response status: 200
1645
+ 2025-11-12 16:41:00,503 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/docs
1646
+ 2025-11-12 16:41:00,503 | INFO | access_logger | app.py:20 | Response status: 200
1647
+ 2025-11-12 16:41:00,667 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/openapi.json
1648
+ 2025-11-12 16:41:00,674 | INFO | access_logger | app.py:20 | Response status: 200
1649
+ 2025-11-12 16:41:37,091 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20to%20view%20the%20emoji%20counts%20of%20divya%20dhakal
1650
+ 2025-11-12 16:45:48,973 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/
1651
+ 2025-11-12 16:45:48,974 | INFO | access_logger | app.py:20 | Response status: 200
1652
+ 2025-11-12 16:45:53,820 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/docs
1653
+ 2025-11-12 16:45:53,821 | INFO | access_logger | app.py:20 | Response status: 200
1654
+ 2025-11-12 16:45:53,865 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/openapi.json
1655
+ 2025-11-12 16:45:53,871 | INFO | access_logger | app.py:20 | Response status: 200
1656
+ 2025-11-12 16:46:21,569 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20to%20view%20the%20emoji%20count%20of%20divya%20dhakal
1657
+ 2025-11-12 16:53:03,322 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/docs
1658
+ 2025-11-12 16:53:03,323 | INFO | access_logger | app.py:20 | Response status: 200
1659
+ 2025-11-12 16:53:03,371 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/openapi.json
1660
+ 2025-11-12 16:53:03,372 | INFO | access_logger | app.py:20 | Response status: 200
1661
+ 2025-11-12 16:53:04,862 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/
1662
+ 2025-11-12 16:53:04,863 | INFO | access_logger | app.py:20 | Response status: 200
1663
+ 2025-11-12 16:53:07,974 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/docs
1664
+ 2025-11-12 16:53:07,975 | INFO | access_logger | app.py:20 | Response status: 200
1665
+ 2025-11-12 16:53:08,019 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/openapi.json
1666
+ 2025-11-12 16:53:08,021 | INFO | access_logger | app.py:20 | Response status: 200
1667
+ 2025-11-12 16:53:38,058 | INFO | access_logger | app.py:18 | Request: GET http://127.0.0.1:8000/api/analytics-chatbot?msg=I%20want%20to%20view%20the%20emoji%20count%20of%20divya%20dhakal
src/genai/analytics_chatbot/handlers/bot_and_diversity.py CHANGED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import requests
3
+ from langchain_core.messages import SystemMessage,HumanMessage
4
+ from ..utils.schemas import BotAndDiversityFormatter
5
+ from ..utils.prompts import bot_and_diversity_prompt
6
+ from ..utils.utils import process_query
7
+
8
+
9
+ def get_bot_and_diversity(state,llm_gpt,url):
10
+ messages = [SystemMessage(content=bot_and_diversity_prompt),
11
+ HumanMessage(content=str(state['messages']))]
12
+ parameters=llm_gpt.with_structured_output(BotAndDiversityFormatter , method='function_calling').invoke(messages)
13
+ print(parameters)
14
+ response = requests.get(url, params={'top_n': parameters.top_n, 'start_date': None , 'end_date':None,'influencer_username':parameters.influencer_name})
15
+ return response
16
+
src/genai/analytics_chatbot/handlers/comment_quality.py CHANGED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import requests
4
+ from langchain_core.messages import SystemMessage,HumanMessage
5
+ from ..utils.schemas import CommentQualityFormatter
6
+ from ..utils.prompts import comment_quality_prompt
7
+ from ..utils.utils import process_query
8
+
9
+
10
+ def get_comment_quality(state,llm_gpt,url):
11
+ messages = [SystemMessage(content=comment_quality_prompt),
12
+ HumanMessage(content=str(state['messages']))]
13
+ parameters=llm_gpt.with_structured_output(CommentQualityFormatter , method='function_calling').invoke(messages)
14
+ print(parameters)
15
+ response = requests.get(url, params={ 'start_date': parameters.start_date , 'end_date':parameters.end_date,'influencer_username':process_query(parameters.influencer_name)})
16
+ return response.json()
17
+
src/genai/analytics_chatbot/handlers/compare.py CHANGED
@@ -2,11 +2,11 @@
2
  import requests
3
  from langchain_core.messages import SystemMessage,HumanMessage
4
  from ..utils.schemas import CompareBodyFormatter
5
- from ..utils.prompts import get_body_prompt
6
  from ..utils.utils import process_query
7
 
8
  def compare(state,llm_gpt,url):
9
- messages = [SystemMessage(content=get_body_prompt()),
10
  HumanMessage(content=str(state['messages']))]
11
  response=llm_gpt.with_structured_output(CompareBodyFormatter , method='function_calling').invoke(messages)
12
  print('INF names response:', response)
 
2
  import requests
3
  from langchain_core.messages import SystemMessage,HumanMessage
4
  from ..utils.schemas import CompareBodyFormatter
5
+ from ..utils.prompts import compare_prompt
6
  from ..utils.utils import process_query
7
 
8
  def compare(state,llm_gpt,url):
9
+ messages = [SystemMessage(content=compare_prompt()),
10
  HumanMessage(content=str(state['messages']))]
11
  response=llm_gpt.with_structured_output(CompareBodyFormatter , method='function_calling').invoke(messages)
12
  print('INF names response:', response)
src/genai/analytics_chatbot/handlers/emoji_count.py CHANGED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import requests
3
+ from langchain_core.messages import SystemMessage,HumanMessage
4
+ from ..utils.schemas import EmojiCountFormater
5
+ from ..utils.prompts import emoji_count_prompt
6
+ from ..utils.utils import process_query
7
+
8
+
9
+
10
+ def get_emoji_count(state,llm_gpt , url):
11
+ messages = [SystemMessage(content=emoji_count_prompt),
12
+ HumanMessage(content=str(state['messages']))]
13
+ parameters=llm_gpt.with_structured_output(EmojiCountFormater , method='function_calling').invoke(messages)
14
+ print(parameters)
15
+ response = requests.get(url, params={'top_n': parameters.top_n, 'influencer_username':process_query(parameters.influencer_name)})
16
+ print('The response is:', response)
17
+ return response
18
+
src/genai/analytics_chatbot/handlers/peak_comment_hour.py CHANGED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from langchain_core.messages import SystemMessage,HumanMessage
3
+ from ..utils.schemas import PeakCommentHourFormatter
4
+ from ..utils.prompts import peak_comment_hour_prompt
5
+ from ..utils.utils import process_query
6
+
7
+ def get_peak_comment_hour(state,llm_gpt,url):
8
+ messages = [SystemMessage(content=peak_comment_hour_prompt),
9
+ HumanMessage(content=str(state['messages']))]
10
+ parameters=llm_gpt.with_structured_output(PeakCommentHourFormatter , method='function_calling').invoke(messages)
11
+ response = requests.get(url, params={'start_date': parameters.start_date , 'end_date':parameters.end_date, 'influencer_username':process_query(parameters.influencer_name)})
12
+ return response.json()
13
+
src/genai/analytics_chatbot/handlers/posting_time.py CHANGED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from langchain_core.messages import SystemMessage,HumanMessage
3
+ from ..utils.schemas import PostingTimeFormatter
4
+ from ..utils.prompts import posting_time_analysis_prompt
5
+ from ..utils.utils import process_query
6
+
7
+
8
+
9
+ def get_posting_time(state,llm_gpt,url):
10
+ messages = [SystemMessage(content=posting_time_analysis_prompt),
11
+ HumanMessage(content=str(state['messages']))]
12
+ parameters=llm_gpt.with_structured_output(PostingTimeFormatter , method='function_calling').invoke(messages)
13
+ response = requests.get(url, params={'start_date': parameters.start_date , 'end_date':parameters.end_date, 'influencer_username':process_query(parameters.influencer_name)})
14
+ return response
15
+
16
+
17
+
18
+
src/genai/analytics_chatbot/utils/nodes.py CHANGED
@@ -2,14 +2,19 @@ import requests
2
  from langchain_core.messages import SystemMessage , HumanMessage , FunctionMessage
3
  from .state import State
4
  from .tools import RetrieverBackup
5
- from .schemas import CompareBodyFormatter, ParameterFormatter, EndpointFormatter
6
- from .prompts import query_check_prompt , get_body_prompt , fetch_last_message_prompt , fetch_parameters_prompt, fetch_endpoint_prompt, backup_retrieval_prompt
7
  from .utils import process_query, get_endpoint_info
8
  from src.genai.utils.models_loader import llm_gpt
9
  import numpy as np
10
  from src.genai.utils.data_loader import api_knowledge_df, api_index, caption_df , caption_index
11
  from src.genai.utils.models_loader import embedding_model
12
  from ..handlers.compare import compare
 
 
 
 
 
13
 
14
 
15
  class FetchLastMessage:
@@ -33,16 +38,17 @@ class RetrievePossibleEndpoints:
33
  def __init__(self):
34
  self.df = api_knowledge_df
35
  self.index = api_index
36
- self.results = []
 
37
 
38
  def run(self,state:State):
39
  print('Gone to retrieve possible endpoints')
40
- query_embedding = np.array(embedding_model.embed_query(state['latest_message'])).reshape(1, -1).astype('float32')
41
- distances, indices = self.index.search(query_embedding, 5)
42
- for idx in indices[0]:
43
- row = self.df.iloc[idx]
44
- print('Endpoint:',row['endpoint'])
45
- self.results.append(row['endpoint'])
46
  print('The possible endpoints are:', self.results)
47
  return {
48
  "possible_endpoints": self.results,
@@ -91,13 +97,13 @@ class QueryCheckNode:
91
  class FetchParametersNode:
92
  def __init__(self):
93
  self.llm = llm_gpt
94
-
 
95
  def run(self , state:State):
96
  try:
97
  print('Entered to fetch parameters')
98
- print(state['method'])
99
 
100
- if state['method']=='GET':
101
  template = fetch_parameters_prompt
102
  messages=[SystemMessage(content=template),HumanMessage(content=f'''The query is: {state['latest_message']}\n. The needed parameters: {str(state['needed_parameters'])}''')]
103
  # print('messages:', messages)
@@ -132,32 +138,30 @@ class FetchDataNode:
132
 
133
  def run(self, state:State):
134
  try:
 
135
  print('Entered to fetch data')
136
  url = f'''{self.base_url}{state['endpoint']}'''
137
 
138
  if state['endpoint'] == '/api/v1/compare/':
139
  response=compare(state,llm_gpt,url)
140
  return {'response': response.json()}
141
- # print('Condition satisfied')
142
- # messages = [SystemMessage(content=get_body_prompt()),
143
- # HumanMessage(content=str(state['messages']))]
144
- # response=llm_gpt.with_structured_output(CompareBodyFormatter , method='function_calling').invoke(messages)
145
- # print('INF names response:', response)
146
- # payload = {
147
- # "usernames": list(map(process_query,response.names)),
148
- # "freq": response.frequency
149
- # }
150
-
151
- # print('The payload is:',payload)
152
-
153
- # headers = {
154
- # "Content-Type": "application/json"
155
- # }
156
-
157
- # response = requests.post(url, json=payload, headers=headers)
158
- # print('Data from api:', response)
159
- # return {'response': response.json()}
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  elif 'single_influencer_query' in state['query_type']:
162
  response = requests.get(url, params=state['parameters_values'],headers=self.headers)
163
  print('Data from api:', response)
 
2
  from langchain_core.messages import SystemMessage , HumanMessage , FunctionMessage
3
  from .state import State
4
  from .tools import RetrieverBackup
5
+ from .schemas import ParameterFormatter, EndpointFormatter
6
+ from .prompts import query_check_prompt, fetch_last_message_prompt , fetch_parameters_prompt, fetch_endpoint_prompt
7
  from .utils import process_query, get_endpoint_info
8
  from src.genai.utils.models_loader import llm_gpt
9
  import numpy as np
10
  from src.genai.utils.data_loader import api_knowledge_df, api_index, caption_df , caption_index
11
  from src.genai.utils.models_loader import embedding_model
12
  from ..handlers.compare import compare
13
+ from ..handlers.posting_time import get_posting_time
14
+ from ..handlers.peak_comment_hour import get_peak_comment_hour
15
+ from ..handlers.emoji_count import get_emoji_count
16
+ from ..handlers.comment_quality import get_comment_quality
17
+
18
 
19
 
20
  class FetchLastMessage:
 
38
  def __init__(self):
39
  self.df = api_knowledge_df
40
  self.index = api_index
41
+ # self.results = []
42
+ self.results = ['/api/v1/compare/', '/api/v1/engagement/basic-metrics', '/api/v1/content/hashtags-analysis', '/api/v1/audience/emoji-count', '/api/v1/engagement/temporal_analysis']
43
 
44
  def run(self,state:State):
45
  print('Gone to retrieve possible endpoints')
46
+ # query_embedding = np.array(embedding_model.embed_query(state['latest_message'])).reshape(1, -1).astype('float32')
47
+ # distances, indices = self.index.search(query_embedding, 5)
48
+ # for idx in indices[0]:
49
+ # row = self.df.iloc[idx]
50
+ # print('Endpoint:',row['endpoint'])
51
+ # self.results.append(row['endpoint'])
52
  print('The possible endpoints are:', self.results)
53
  return {
54
  "possible_endpoints": self.results,
 
97
  class FetchParametersNode:
98
  def __init__(self):
99
  self.llm = llm_gpt
100
+ self.complex_endpoints=['/api/v1/compare/','/api/v1/engagement/posting-time-analysis','/api/v1/audience/peak-comment-hour','/api/v1/audience/emoji-count','/api/v1/audience/comment-quality']
101
+
102
  def run(self , state:State):
103
  try:
104
  print('Entered to fetch parameters')
 
105
 
106
+ if state['endpoint'] not in self.complex_endpoints:
107
  template = fetch_parameters_prompt
108
  messages=[SystemMessage(content=template),HumanMessage(content=f'''The query is: {state['latest_message']}\n. The needed parameters: {str(state['needed_parameters'])}''')]
109
  # print('messages:', messages)
 
138
 
139
  def run(self, state:State):
140
  try:
141
+
142
  print('Entered to fetch data')
143
  url = f'''{self.base_url}{state['endpoint']}'''
144
 
145
  if state['endpoint'] == '/api/v1/compare/':
146
  response=compare(state,llm_gpt,url)
147
  return {'response': response.json()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
+ elif state['endpoint'] == '/api/v1/engagement/posting-time-analysis':
150
+ response = get_posting_time(state, llm_gpt,url)
151
+ return {'response': response.json()}
152
+
153
+ elif state['endpoint']=='/api/v1/audience/peak-comment-hour':
154
+ response = get_peak_comment_hour(state,llm_gpt,url)
155
+ return {'response':response.json()}
156
+
157
+ elif state['endpoint']== '/api/v1/audience/emoji-count':
158
+ response = get_emoji_count(state,llm_gpt,url)
159
+ return {'response:',response.json()}
160
+
161
+ elif state['endpoint']== '/api/v1/audience/comment-quality':
162
+ response = get_comment_quality(state,llm_gpt,url)
163
+ return {'response:',response.json()}
164
+
165
  elif 'single_influencer_query' in state['query_type']:
166
  response = requests.get(url, params=state['parameters_values'],headers=self.headers)
167
  print('Data from api:', response)
src/genai/analytics_chatbot/utils/prompts.py CHANGED
@@ -1,47 +1,5 @@
1
- def chatbot_prompt():
2
- return f"""
3
- You are an intelligent assistant whose task is to route user queries to the correct API endpoint.
4
- You have access to the API knowledge base, which contains information about each endpoint:
5
- - The endpoint path
6
- -The method 'GET' or 'POST'
7
- - Its required parameters
8
- - A description of what the endpoint does
9
-
10
- Your job is to:
11
- 1. Read the user's natural language query.
12
- 2. Analyze the API knowledge base.
13
- 3. Identify the **most appropriate endpoint** that can satisfy the user's request.
14
- 4. Determine the required parameters for that endpoint and fill in their values based on the user's query.
15
- 5. Return the result in a **strict JSON format** exactly like this:
16
-
17
- "endpoint": "<chosen endpoint path>",
18
- "method": GET or POST
19
- "parameters":
20
- "<param1>": "<value1>",
21
- "<param2>": "<value2>"
22
-
23
- Important instructions:
24
- - Only return endpoints that exist in the API knowledge base.
25
- - Include all required parameters for the endpoint.
26
- - If the parameter or method is not specified in the user's query, return it as null.
27
- - Do not add any extra explanation or text; return **only the JSON**.
28
- - The API knowledge base will be provided as a separate function message.
29
 
30
- Example:
31
- User query: "Give me the buzz trend of influencer John for last month"
32
- API knowledge: contains endpoint "/overview/buzz_trend" with parameters ["period", "influencer_username"]
33
- Expected output:
34
-
35
- "endpoint": "/api/v1/overview/buzz_trend",
36
- "method": GET
37
- "parameters":
38
- "period": "monthly",
39
- "influencer_username": "John"
40
-
41
- Your response must always follow this exact JSON format.
42
- """
43
-
44
- def get_body_prompt():
45
  return '''You are given a user query for comparing influencers.
46
 
47
  Your task:
@@ -110,11 +68,6 @@ endpoint: /api/v1/analytics/engagement
110
 
111
  '''
112
 
113
- backup_retrieval_prompt = '''
114
- You are provided with the retrieved data as a function message and the user query.
115
- Respond to the user query only through the context of retrieved data. Don't give hallucinated responses.
116
- '''
117
-
118
  query_check_prompt = '''
119
  You are an intent classification assistant.
120
  Given a user query about influencer analytics, classify it as one of the following types:
@@ -123,4 +76,36 @@ Given a user query about influencer analytics, classify it as one of the followi
123
  2. aggregate_query — if the query involves comparing multiple influencers, rankings, or overall statistics (e.g., "Who has the highest engagement?").
124
 
125
  Return only one label: "single_influencer_query" or "aggregate_query".
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+ def compare_prompt():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  return '''You are given a user query for comparing influencers.
4
 
5
  Your task:
 
68
 
69
  '''
70
 
 
 
 
 
 
71
  query_check_prompt = '''
72
  You are an intent classification assistant.
73
  Given a user query about influencer analytics, classify it as one of the following types:
 
76
  2. aggregate_query — if the query involves comparing multiple influencers, rankings, or overall statistics (e.g., "Who has the highest engagement?").
77
 
78
  Return only one label: "single_influencer_query" or "aggregate_query".
79
+ '''
80
+
81
+
82
+ posting_time_analysis_prompt = '''
83
+ You are perfect parameters extractor for posting time analysis of the influencer.
84
+ Given a user query and a list of needed parameters, return a Python dictionary assigning the best value for each parameter.
85
+ You have to return a dictionary containing influencer_name , start_date and end_date. If there is no any mention of the dates, keep the dates as None.
86
+ '''
87
+
88
+ peak_comment_hour_prompt = '''
89
+ You are perfect parameters extractor for analysis of peak comment hour the influencer.
90
+ Given a user query and a list of needed parameters, return a Python dictionary assigning the best value for each parameter.
91
+ You have to return a dictionary containing influencer_name , start_date and end_date. If there is no any mention of the dates, keep the dates as None.
92
+ '''
93
+
94
+ emoji_count_prompt = '''
95
+ You are perfect parameters extractor for analysis of emoji count of the influencer.
96
+ Given a user query and a list of needed parameters, return a Python dictionary assigning the best value for each parameter.
97
+ You have to return a dictionary containing influencer_name , and the number of emoji (top_n) by understanding the user query. If there is no any mention of the number of emoji, then keep it 15 as default.
98
+ '''
99
+
100
+ comment_quality_prompt = '''
101
+ You are perfect parameters extractor for analysis of comment quality of the influencer.
102
+ Given a user query and a list of needed parameters, return a Python dictionary assigning the best value for each parameter.
103
+ You have to return a dictionary containing influencer_name , start_date and end_date. If there is no any mention of the dates, keep the dates as None.
104
+ '''
105
+
106
+ bot_and_diversity_prompt = '''
107
+ You are perfect parameters extractor for analysis of bot and comment diversity of the influencer.
108
+ Given a user query and a list of needed parameters, return a Python dictionary assigning the best value for each parameter.
109
+ You have to return a dictionary containing influencer_name , number of commentors (top_n), start_date and end_date from the user query.
110
+ If there is no any specific mention of dates, you can return None for dates. In the case of number of commentors, return a default value of 10 if the number is not passed from the user.
111
  '''
src/genai/analytics_chatbot/utils/schemas.py CHANGED
@@ -1,5 +1,6 @@
1
  from pydantic import BaseModel, Field
2
  from typing import Optional , Dict , Any
 
3
 
4
  class ResponseFormatter(BaseModel):
5
  endpoint: str = Field(description='Return the exact endpoint from the knowledge base of endpoints.')
@@ -21,4 +22,29 @@ class ParameterFormatter(BaseModel):
21
 
22
  class EndpointFormatter(BaseModel):
23
  endpoint: str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
 
1
  from pydantic import BaseModel, Field
2
  from typing import Optional , Dict , Any
3
+ from datetime import date
4
 
5
  class ResponseFormatter(BaseModel):
6
  endpoint: str = Field(description='Return the exact endpoint from the knowledge base of endpoints.')
 
22
 
23
  class EndpointFormatter(BaseModel):
24
  endpoint: str
25
+
26
+ class PostingTimeFormatter(BaseModel):
27
+ start_date: Optional[date] = None
28
+ end_date: Optional[date] = None
29
+ influencer_name: str
30
+
31
+ class PeakCommentHourFormatter(BaseModel):
32
+ start_date: Optional[date] = None
33
+ end_date: Optional[date] = None
34
+ influencer_name: str
35
+
36
+ class EmojiCountFormater(BaseModel):
37
+ top_n: int
38
+ influencer_name: str
39
+
40
+ class CommentQualityFormatter(BaseModel):
41
+ start_date: Optional[date] = None
42
+ end_date: Optional[date] = None
43
+ influencer_name: str
44
+
45
+ class BotAndDiversityFormatter(BaseModel):
46
+ start_date: Optional[date] = None
47
+ end_date: Optional[date] = None
48
+ influencer_name: str
49
+ top_n: int
50
 
src/genai/utils/models_loader.py CHANGED
@@ -23,8 +23,8 @@ llm_groq = ChatGroq(model="llama-3.3-70b-versatile",temperature=0)
23
  llm_gpt_small = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
24
  llm_gpt = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
25
  llm_gpt_high = ChatOpenAI(model="gpt-5-nano",temperature=0.5)
26
- encoding_model = tiktoken.encoding_for_model('gpt-4o-mini')
27
- # encoding_model = 'encoding_model'
28
 
29
 
30
  captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct"
 
23
  llm_gpt_small = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
24
  llm_gpt = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
25
  llm_gpt_high = ChatOpenAI(model="gpt-5-nano",temperature=0.5)
26
+ # encoding_model = tiktoken.encoding_for_model('gpt-4o-mini')
27
+ encoding_model = 'encoding_model'
28
 
29
 
30
  captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct"