Spaces:
Sleeping
Sleeping
Commit
·
92cf6e2
1
Parent(s):
03ef145
Next commit
Browse files- src/genai/analytics_chatbot/handlers/__init__.py +0 -0
- src/genai/analytics_chatbot/handlers/bot_and_diversity.py +0 -0
- src/genai/analytics_chatbot/handlers/comment_quality.py +0 -0
- src/genai/analytics_chatbot/handlers/compare.py +26 -0
- src/genai/analytics_chatbot/handlers/emoji_count.py +0 -0
- src/genai/analytics_chatbot/handlers/peak_comment_hour.py +0 -0
- src/genai/analytics_chatbot/handlers/posting_time.py +0 -0
- src/genai/analytics_chatbot/utils/__init__.py +1 -0
- src/genai/analytics_chatbot/utils/nodes.py +23 -19
src/genai/analytics_chatbot/handlers/__init__.py
ADDED
|
File without changes
|
src/genai/analytics_chatbot/handlers/bot_and_diversity.py
ADDED
|
File without changes
|
src/genai/analytics_chatbot/handlers/comment_quality.py
ADDED
|
File without changes
|
src/genai/analytics_chatbot/handlers/compare.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import requests
|
| 3 |
+
from langchain_core.messages import SystemMessage,HumanMessage
|
| 4 |
+
from ..utils.schemas import CompareBodyFormatter
|
| 5 |
+
from ..utils.prompts import get_body_prompt
|
| 6 |
+
from ..utils.utils import process_query
|
| 7 |
+
|
| 8 |
+
def compare(state,llm_gpt,url):
|
| 9 |
+
messages = [SystemMessage(content=get_body_prompt()),
|
| 10 |
+
HumanMessage(content=str(state['messages']))]
|
| 11 |
+
response=llm_gpt.with_structured_output(CompareBodyFormatter , method='function_calling').invoke(messages)
|
| 12 |
+
print('INF names response:', response)
|
| 13 |
+
payload = {
|
| 14 |
+
"usernames": list(map(process_query,response.names)),
|
| 15 |
+
"freq": response.frequency
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
print('The payload is:',payload)
|
| 19 |
+
|
| 20 |
+
headers = {
|
| 21 |
+
"Content-Type": "application/json"
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
response = requests.post(url, json=payload, headers=headers)
|
| 25 |
+
print('Data from api:', response)
|
| 26 |
+
return response
|
src/genai/analytics_chatbot/handlers/emoji_count.py
ADDED
|
File without changes
|
src/genai/analytics_chatbot/handlers/peak_comment_hour.py
ADDED
|
File without changes
|
src/genai/analytics_chatbot/handlers/posting_time.py
ADDED
|
File without changes
|
src/genai/analytics_chatbot/utils/__init__.py
CHANGED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
init_variable = 5
|
src/genai/analytics_chatbot/utils/nodes.py
CHANGED
|
@@ -2,13 +2,14 @@ import requests
|
|
| 2 |
from langchain_core.messages import SystemMessage , HumanMessage , FunctionMessage
|
| 3 |
from .state import State
|
| 4 |
from .tools import RetrieverBackup
|
| 5 |
-
from .schemas import
|
| 6 |
from .prompts import query_check_prompt , get_body_prompt , fetch_last_message_prompt , fetch_parameters_prompt, fetch_endpoint_prompt, backup_retrieval_prompt
|
| 7 |
-
from .utils import
|
| 8 |
from src.genai.utils.models_loader import llm_gpt
|
| 9 |
import numpy as np
|
| 10 |
from src.genai.utils.data_loader import api_knowledge_df, api_index, caption_df , caption_index
|
| 11 |
from src.genai.utils.models_loader import embedding_model
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
class FetchLastMessage:
|
|
@@ -135,27 +136,31 @@ class FetchDataNode:
|
|
| 135 |
url = f'''{self.base_url}{state['endpoint']}'''
|
| 136 |
|
| 137 |
if state['endpoint'] == '/api/v1/compare/':
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
|
|
|
|
|
|
| 147 |
|
| 148 |
-
print('The payload is:',payload)
|
| 149 |
|
| 150 |
-
headers = {
|
| 151 |
-
"Content-Type": "application/json"
|
| 152 |
-
}
|
| 153 |
|
| 154 |
-
response = requests.post(url, json=payload, headers=headers)
|
| 155 |
-
|
|
|
|
| 156 |
|
| 157 |
elif 'single_influencer_query' in state['query_type']:
|
| 158 |
response = requests.get(url, params=state['parameters_values'],headers=self.headers)
|
|
|
|
| 159 |
return {'response':response.json()}
|
| 160 |
|
| 161 |
elif 'aggregate_query' in state['query_type']:
|
|
@@ -172,10 +177,9 @@ class FetchDataNode:
|
|
| 172 |
|
| 173 |
response = requests.get(url, params=current_params, headers=self.headers)
|
| 174 |
results[username] = response.json() # Store influencer-wise response
|
| 175 |
-
|
| 176 |
return {"response": results}
|
| 177 |
|
| 178 |
-
print('Data from api:', response)
|
| 179 |
|
| 180 |
except Exception as e:
|
| 181 |
print('Error occoured:', e)
|
|
|
|
| 2 |
from langchain_core.messages import SystemMessage , HumanMessage , FunctionMessage
|
| 3 |
from .state import State
|
| 4 |
from .tools import RetrieverBackup
|
| 5 |
+
from .schemas import CompareBodyFormatter, ParameterFormatter, EndpointFormatter
|
| 6 |
from .prompts import query_check_prompt , get_body_prompt , fetch_last_message_prompt , fetch_parameters_prompt, fetch_endpoint_prompt, backup_retrieval_prompt
|
| 7 |
+
from .utils import process_query, get_endpoint_info
|
| 8 |
from src.genai.utils.models_loader import llm_gpt
|
| 9 |
import numpy as np
|
| 10 |
from src.genai.utils.data_loader import api_knowledge_df, api_index, caption_df , caption_index
|
| 11 |
from src.genai.utils.models_loader import embedding_model
|
| 12 |
+
from ..handlers.compare import compare
|
| 13 |
|
| 14 |
|
| 15 |
class FetchLastMessage:
|
|
|
|
| 136 |
url = f'''{self.base_url}{state['endpoint']}'''
|
| 137 |
|
| 138 |
if state['endpoint'] == '/api/v1/compare/':
|
| 139 |
+
response=compare(state,llm_gpt,url)
|
| 140 |
+
return {'response': response.json()}
|
| 141 |
+
# print('Condition satisfied')
|
| 142 |
+
# messages = [SystemMessage(content=get_body_prompt()),
|
| 143 |
+
# HumanMessage(content=str(state['messages']))]
|
| 144 |
+
# response=llm_gpt.with_structured_output(CompareBodyFormatter , method='function_calling').invoke(messages)
|
| 145 |
+
# print('INF names response:', response)
|
| 146 |
+
# payload = {
|
| 147 |
+
# "usernames": list(map(process_query,response.names)),
|
| 148 |
+
# "freq": response.frequency
|
| 149 |
+
# }
|
| 150 |
|
| 151 |
+
# print('The payload is:',payload)
|
| 152 |
|
| 153 |
+
# headers = {
|
| 154 |
+
# "Content-Type": "application/json"
|
| 155 |
+
# }
|
| 156 |
|
| 157 |
+
# response = requests.post(url, json=payload, headers=headers)
|
| 158 |
+
# print('Data from api:', response)
|
| 159 |
+
# return {'response': response.json()}
|
| 160 |
|
| 161 |
elif 'single_influencer_query' in state['query_type']:
|
| 162 |
response = requests.get(url, params=state['parameters_values'],headers=self.headers)
|
| 163 |
+
print('Data from api:', response)
|
| 164 |
return {'response':response.json()}
|
| 165 |
|
| 166 |
elif 'aggregate_query' in state['query_type']:
|
|
|
|
| 177 |
|
| 178 |
response = requests.get(url, params=current_params, headers=self.headers)
|
| 179 |
results[username] = response.json() # Store influencer-wise response
|
| 180 |
+
print('Data from api:', response)
|
| 181 |
return {"response": results}
|
| 182 |
|
|
|
|
| 183 |
|
| 184 |
except Exception as e:
|
| 185 |
print('Error occoured:', e)
|