subashpoudel commited on
Commit
93a5bf9
Β·
1 Parent(s): d7bad29

Changed the entire project structure

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. __pycache__/main.cpython-312.pyc +0 -0
  2. {my_agent β†’ brainstroming_agent}/__init__.py +0 -0
  3. {my_agent β†’ brainstroming_agent}/__pycache__/__init__.cpython-312.pyc +0 -0
  4. {my_agent β†’ brainstroming_agent}/__pycache__/agent.cpython-312.pyc +0 -0
  5. {my_agent β†’ brainstroming_agent}/agent.py +1 -1
  6. {my_agent β†’ brainstroming_agent}/utils/__init__.py +0 -0
  7. {my_agent β†’ brainstroming_agent}/utils/__pycache__/__init__.cpython-312.pyc +0 -0
  8. {my_agent β†’ brainstroming_agent}/utils/__pycache__/business_interaction.cpython-312.pyc +0 -0
  9. {my_agent β†’ brainstroming_agent}/utils/__pycache__/check.cpython-312.pyc +0 -0
  10. {my_agent β†’ brainstroming_agent}/utils/__pycache__/data_loader.cpython-312.pyc +0 -0
  11. {my_agent β†’ brainstroming_agent}/utils/__pycache__/initial_interaction.cpython-312.pyc +0 -0
  12. {my_agent β†’ brainstroming_agent}/utils/__pycache__/models.cpython-312.pyc +0 -0
  13. {my_agent β†’ brainstroming_agent}/utils/__pycache__/models_loader.cpython-312.pyc +0 -0
  14. {my_agent β†’ brainstroming_agent}/utils/__pycache__/nodes.cpython-312.pyc +0 -0
  15. {my_agent β†’ brainstroming_agent}/utils/__pycache__/prompts.cpython-312.pyc +0 -0
  16. brainstroming_agent/utils/__pycache__/state.cpython-312.pyc +0 -0
  17. {my_agent β†’ brainstroming_agent}/utils/__pycache__/tools.cpython-312.pyc +0 -0
  18. {my_agent β†’ brainstroming_agent}/utils/__pycache__/utils.cpython-312.pyc +0 -0
  19. {my_agent β†’ brainstroming_agent}/utils/__pycache__/validators.cpython-312.pyc +0 -0
  20. {my_agent β†’ brainstroming_agent}/utils/nodes.py +5 -12
  21. {my_agent β†’ brainstroming_agent}/utils/prompts.py +0 -0
  22. {my_agent β†’ brainstroming_agent}/utils/state.py +9 -2
  23. {my_agent β†’ brainstroming_agent}/utils/tools.py +2 -2
  24. {my_agent β†’ brainstroming_agent}/utils/utils.py +2 -3
  25. {my_agent β†’ brainstroming_agent}/utils/validators.py +0 -0
  26. business_interaction_agent/__init__.py +0 -0
  27. business_interaction_agent/__pycache__/__init__.cpython-312.pyc +0 -0
  28. business_interaction_agent/__pycache__/agent.cpython-312.pyc +0 -0
  29. my_agent/utils/business_interaction.py β†’ business_interaction_agent/agent.py +35 -17
  30. business_interaction_agent/utils/__init__.py +0 -0
  31. business_interaction_agent/utils/__pycache__/__init__.cpython-312.pyc +0 -0
  32. business_interaction_agent/utils/__pycache__/nodes.cpython-312.pyc +0 -0
  33. business_interaction_agent/utils/__pycache__/prompts.cpython-312.pyc +0 -0
  34. business_interaction_agent/utils/__pycache__/state.cpython-312.pyc +0 -0
  35. business_interaction_agent/utils/__pycache__/utils.cpython-312.pyc +0 -0
  36. business_interaction_agent/utils/nodes.py +20 -0
  37. business_interaction_agent/utils/prompts.py +44 -0
  38. business_interaction_agent/utils/state.py +6 -0
  39. business_interaction_agent/utils/tools.py +0 -0
  40. business_interaction_agent/utils/utils.py +67 -0
  41. context_analysis_agent/__init__.py +0 -0
  42. context_analysis_agent/__pycache__/__init__.cpython-312.pyc +0 -0
  43. context_analysis_agent/__pycache__/agent.cpython-312.pyc +0 -0
  44. my_agent/utils/initial_interaction.py β†’ context_analysis_agent/agent.py +7 -29
  45. context_analysis_agent/utils/__init__.py +0 -0
  46. context_analysis_agent/utils/__pycache__/__init__.cpython-312.pyc +0 -0
  47. context_analysis_agent/utils/__pycache__/nodes.cpython-312.pyc +0 -0
  48. context_analysis_agent/utils/__pycache__/prompts.cpython-312.pyc +0 -0
  49. context_analysis_agent/utils/__pycache__/state.cpython-312.pyc +0 -0
  50. context_analysis_agent/utils/__pycache__/utils.cpython-312.pyc +0 -0
__pycache__/main.cpython-312.pyc CHANGED
Binary files a/__pycache__/main.cpython-312.pyc and b/__pycache__/main.cpython-312.pyc differ
 
{my_agent β†’ brainstroming_agent}/__init__.py RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/__pycache__/__init__.cpython-312.pyc RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/__pycache__/agent.cpython-312.pyc RENAMED
Binary files a/my_agent/__pycache__/agent.cpython-312.pyc and b/brainstroming_agent/__pycache__/agent.cpython-312.pyc differ
 
{my_agent β†’ brainstroming_agent}/agent.py RENAMED
@@ -6,7 +6,7 @@ from langgraph.checkpoint.memory import MemorySaver
6
  memory = MemorySaver()
7
 
8
 
9
- def build_graph():
10
  builder = StateGraph(State)
11
  builder.add_node(caption_image)
12
  builder.add_node(retrieve)
 
6
  memory = MemorySaver()
7
 
8
 
9
+ def brainstroming_graph():
10
  builder = StateGraph(State)
11
  builder.add_node(caption_image)
12
  builder.add_node(retrieve)
{my_agent β†’ brainstroming_agent}/utils/__init__.py RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/__pycache__/__init__.cpython-312.pyc RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/__pycache__/business_interaction.cpython-312.pyc RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/__pycache__/check.cpython-312.pyc RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/__pycache__/data_loader.cpython-312.pyc RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/__pycache__/initial_interaction.cpython-312.pyc RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/__pycache__/models.cpython-312.pyc RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/__pycache__/models_loader.cpython-312.pyc RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/__pycache__/nodes.cpython-312.pyc RENAMED
Binary files a/my_agent/utils/__pycache__/nodes.cpython-312.pyc and b/brainstroming_agent/utils/__pycache__/nodes.cpython-312.pyc differ
 
{my_agent β†’ brainstroming_agent}/utils/__pycache__/prompts.cpython-312.pyc RENAMED
File without changes
brainstroming_agent/utils/__pycache__/state.cpython-312.pyc ADDED
Binary file (1.85 kB). View file
 
{my_agent β†’ brainstroming_agent}/utils/__pycache__/tools.cpython-312.pyc RENAMED
Binary files a/my_agent/utils/__pycache__/tools.cpython-312.pyc and b/brainstroming_agent/utils/__pycache__/tools.cpython-312.pyc differ
 
{my_agent β†’ brainstroming_agent}/utils/__pycache__/utils.cpython-312.pyc RENAMED
Binary files a/my_agent/utils/__pycache__/utils.cpython-312.pyc and b/brainstroming_agent/utils/__pycache__/utils.cpython-312.pyc differ
 
{my_agent β†’ brainstroming_agent}/utils/__pycache__/validators.cpython-312.pyc RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/nodes.py RENAMED
@@ -3,15 +3,15 @@ import ast
3
  from .state import State
4
  from .tools import retrieve_tool
5
  from langchain_core.messages import SystemMessage
6
- from .models_loader import llm , ST
7
- from .data_loader import load_influencer_data
8
  from groq import Groq
9
  import os
10
  from .prompts import image_captioning_prompt , initial_story_prompt , refined_story_prompt , brainstroming_prompt , final_story_prompt
11
  from langgraph.prebuilt import create_react_agent
12
  from pydantic import BaseModel , Field
13
  from langchain_core.tools import tool
14
- from .validators import BrainstromTopicFormatter
15
 
16
 
17
 
@@ -113,15 +113,8 @@ def generate_brainstroming(state:State)-> State:
113
  template= brainstroming_prompt(state)
114
 
115
  messages = [SystemMessage(content=template)]
116
- response = llm.bind_tools([BrainstromTopicFormatter]).invoke(messages)
117
- print('The response is:',response)
118
- if hasattr(response, 'tool_calls') and response.tool_calls:
119
- response = response.tool_calls[0]['args']
120
- elif hasattr(response, 'content'):
121
- response = response.content
122
- else:
123
- response = "No response"
124
-
125
  state.brainstroming_topics.append(response)
126
  print('The brainstroming topics are:',state.brainstroming_topics)
127
  # return State(messages="Story generated",topic=state.topic,brainstroming_topics=state.brainstroming_topics)
 
3
  from .state import State
4
  from .tools import retrieve_tool
5
  from langchain_core.messages import SystemMessage
6
+ from utils.models_loader import llm , ST
7
+ from utils.data_loader import load_influencer_data
8
  from groq import Groq
9
  import os
10
  from .prompts import image_captioning_prompt , initial_story_prompt , refined_story_prompt , brainstroming_prompt , final_story_prompt
11
  from langgraph.prebuilt import create_react_agent
12
  from pydantic import BaseModel , Field
13
  from langchain_core.tools import tool
14
+ from .state import BrainstromTopicFormatter
15
 
16
 
17
 
 
113
  template= brainstroming_prompt(state)
114
 
115
  messages = [SystemMessage(content=template)]
116
+ response = llm.with_structured_output(BrainstromTopicFormatter).invoke(messages)
117
+ response = response.model_dump()
 
 
 
 
 
 
 
118
  state.brainstroming_topics.append(response)
119
  print('The brainstroming topics are:',state.brainstroming_topics)
120
  # return State(messages="Story generated",topic=state.topic,brainstroming_topics=state.brainstroming_topics)
{my_agent β†’ brainstroming_agent}/utils/prompts.py RENAMED
File without changes
{my_agent β†’ brainstroming_agent}/utils/state.py RENAMED
@@ -1,4 +1,4 @@
1
- from pydantic import BaseModel, ConfigDict
2
  from typing import Optional
3
  import pandas as pd
4
 
@@ -15,4 +15,11 @@ class State(BaseModel):
15
  latest_preferred_topics: Optional[list] = []
16
  images: Optional[list[str]] = [] # Base64-encoded strings of images
17
  image_captions: Optional[list] = []
18
- model_config = ConfigDict(arbitrary_types_allowed=True)
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, ConfigDict , Field
2
  from typing import Optional
3
  import pandas as pd
4
 
 
15
  latest_preferred_topics: Optional[list] = []
16
  images: Optional[list[str]] = [] # Base64-encoded strings of images
17
  image_captions: Optional[list] = []
18
+ model_config = ConfigDict(arbitrary_types_allowed=True)
19
+
20
+
21
+ class BrainstromTopicFormatter(BaseModel):
22
+ topic1:str=Field(description="First brainstorming topic of the story")
23
+ topic2:str=Field(description="Second brainstorming topic of the story")
24
+ topic3:str=Field(description="Third brainstorming topic of the story")
25
+ topic4:str=Field(description="Fourth brainstorming topic of the story")
{my_agent β†’ brainstroming_agent}/utils/tools.py RENAMED
@@ -5,8 +5,8 @@ load_dotenv()
5
  import os
6
  import numpy as np
7
  from langchain_core.tools import tool
8
- from .data_loader import load_influencer_data
9
- from .models_loader import ST , llm
10
  from sklearn.metrics.pairwise import cosine_similarity
11
  import numpy as np
12
  from langchain_core.messages import SystemMessage
 
5
  import os
6
  import numpy as np
7
  from langchain_core.tools import tool
8
+ from utils.data_loader import load_influencer_data
9
+ from utils.models_loader import ST , llm
10
  from sklearn.metrics.pairwise import cosine_similarity
11
  import numpy as np
12
  from langchain_core.messages import SystemMessage
{my_agent β†’ brainstroming_agent}/utils/utils.py RENAMED
@@ -1,7 +1,6 @@
1
 
2
  from langchain_core.messages import SystemMessage
3
- from .tools import StoryFormatter , retrieve_tool
4
- from .models_loader import llm
5
  import base64
6
  from PIL import Image
7
  from io import BytesIO
@@ -16,7 +15,7 @@ import ast
16
  import faiss
17
  import re
18
  import numpy as np
19
- from .models_loader import ST
20
 
21
 
22
 
 
1
 
2
  from langchain_core.messages import SystemMessage
3
+ from .tools import retrieve_tool
 
4
  import base64
5
  from PIL import Image
6
  from io import BytesIO
 
15
  import faiss
16
  import re
17
  import numpy as np
18
+ from utils.models_loader import ST , llm
19
 
20
 
21
 
{my_agent β†’ brainstroming_agent}/utils/validators.py RENAMED
File without changes
business_interaction_agent/__init__.py ADDED
File without changes
business_interaction_agent/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (194 Bytes). View file
 
business_interaction_agent/__pycache__/agent.cpython-312.pyc ADDED
Binary file (4.75 kB). View file
 
my_agent/utils/business_interaction.py β†’ business_interaction_agent/agent.py RENAMED
@@ -1,26 +1,44 @@
1
- import os
2
  from langchain_groq import ChatGroq
3
  from langgraph.graph import StateGraph, MessagesState, START, END
4
  from langgraph.checkpoint.memory import MemorySaver
5
- from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
6
- from pydantic import BaseModel, ConfigDict, Field
7
- from typing import Optional, List
8
- from .models_loader import llm,ST
9
- from .prompts import introduction_prompt , business_interaction_prompt, business_retrieval_prompt
10
- from .tools import retrieve_tool
11
  from langgraph.prebuilt import create_react_agent
12
- from .utils import manual_retrieval
13
-
 
 
 
 
14
 
 
15
 
 
 
 
 
 
 
 
 
16
 
17
- # State model
18
- class State(BaseModel):
19
- interactions: Optional[list] = []
20
- model_config = ConfigDict(arbitrary_types_allowed=True)
 
 
 
 
 
21
 
22
- # Global business state (shared)
23
- business_state = State()
 
 
 
 
 
 
 
24
 
25
  class BusinessInteractionChatbot:
26
  def __init__(self):
@@ -51,7 +69,7 @@ class BusinessInteractionChatbot:
51
  def _call_model(self, state):
52
  print('Entered into callmodel')
53
  retrievals = manual_retrieval(str([msg['content'] for msg in self.messages if msg['role'] == 'user']),self.business_details)
54
- template = business_retrieval_prompt(str([msg['content'] for msg in self.messages if msg['role'] == 'user']),retrievals)
55
  messages = [SystemMessage(content=template)] + state["messages"]
56
  backup_response = self.react_agent.invoke({'messages':messages})['messages'][-1]
57
  print('Backup response:',backup_response.content)
@@ -68,4 +86,4 @@ class BusinessInteractionChatbot:
68
  self.messages.append({"role": "assistant", "content": response})
69
  print('The message_history:',self.messages)
70
  business_state.interactions.append({'user': user_input, 'agent_response': response})
71
- return response
 
 
1
  from langchain_groq import ChatGroq
2
  from langgraph.graph import StateGraph, MessagesState, START, END
3
  from langgraph.checkpoint.memory import MemorySaver
 
 
 
 
 
 
4
  from langgraph.prebuilt import create_react_agent
5
+ from .utils.state import State
6
+ # from .utils.nodes import business_interaction_node, cleanup_messages
7
+ from utils.models_loader import llm
8
+ from langchain_core.messages import SystemMessage
9
+ from .utils.prompts import business_retrieval_prompt
10
+ from .utils.utils import manual_retrieval
11
 
12
+ business_state = State()
13
 
14
+ # class BusinessInteractionChatbot:
15
+ # def __init__(self):
16
+ # self.messages = []
17
+ # self.business_details = None
18
+ # self.react_agent = create_react_agent(model=llm, tools=[])
19
+ # self.memory = MemorySaver()
20
+ # self.workflow = self._initialize_workflow()
21
+ # self.interact_agent = self.workflow.compile(checkpointer=self.memory)
22
 
23
+ # def _initialize_workflow(self):
24
+ # workflow = StateGraph(MessagesState)
25
+ # workflow.add_node("chatbot", lambda state: business_interaction_node(
26
+ # state, llm, self.react_agent, self.messages, self.business_details))
27
+ # workflow.add_node("remove_message", lambda state: cleanup_messages(self.messages))
28
+ # workflow.add_edge(START, "chatbot")
29
+ # workflow.add_edge("chatbot", "remove_message")
30
+ # workflow.add_edge("chatbot", END)
31
+ # return workflow
32
 
33
+ # def chat(self, user_input: str, business_details: dict):
34
+ # self.business_details = business_details
35
+ # self.messages.append({"role": "user", "content": user_input})
36
+ # config = {"configurable": {"thread_id": "2"}}
37
+ # response = self.interact_agent.invoke({"messages": self.messages}, config)['messages'][-1].content
38
+ # self.messages.append({"role": "assistant", "content": response})
39
+ # business_state.interactions.append({'user': user_input, 'agent_response': response})
40
+ # return response
41
+
42
 
43
  class BusinessInteractionChatbot:
44
  def __init__(self):
 
69
  def _call_model(self, state):
70
  print('Entered into callmodel')
71
  retrievals = manual_retrieval(str([msg['content'] for msg in self.messages if msg['role'] == 'user']),self.business_details)
72
+ template = business_retrieval_prompt(str([msg['content'] for msg in self.messages if msg['role'] == 'user']),retrievals,str(self.business_details))
73
  messages = [SystemMessage(content=template)] + state["messages"]
74
  backup_response = self.react_agent.invoke({'messages':messages})['messages'][-1]
75
  print('Backup response:',backup_response.content)
 
86
  self.messages.append({"role": "assistant", "content": response})
87
  print('The message_history:',self.messages)
88
  business_state.interactions.append({'user': user_input, 'agent_response': response})
89
+ return response
business_interaction_agent/utils/__init__.py ADDED
File without changes
business_interaction_agent/utils/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (200 Bytes). View file
 
business_interaction_agent/utils/__pycache__/nodes.cpython-312.pyc ADDED
Binary file (1.43 kB). View file
 
business_interaction_agent/utils/__pycache__/prompts.cpython-312.pyc ADDED
Binary file (2.68 kB). View file
 
business_interaction_agent/utils/__pycache__/state.cpython-312.pyc ADDED
Binary file (632 Bytes). View file
 
business_interaction_agent/utils/__pycache__/utils.cpython-312.pyc ADDED
Binary file (3.77 kB). View file
 
business_interaction_agent/utils/nodes.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.messages import SystemMessage
2
+ from .prompts import business_retrieval_prompt
3
+ from .utils import manual_retrieval
4
+
5
+ # This node generates a response using business context and retrieval
6
+ def business_interaction_node(state, llm, react_agent, messages, business_details):
7
+ print('Entered into callmodel')
8
+ user_inputs = str([msg['content'] for msg in messages if msg['role'] == 'user'])
9
+ retrievals = manual_retrieval(user_inputs, business_details)
10
+ template = business_retrieval_prompt(user_inputs, retrievals)
11
+ formatted_messages = [SystemMessage(content=template)] + state["messages"]
12
+ response = react_agent.invoke({'messages': formatted_messages})['messages'][-1]
13
+ print('Backup response:', response.content)
14
+ return {"messages": [response.content]}
15
+
16
+ # Optional message cleanup node
17
+ def cleanup_messages(messages):
18
+ if len(messages) > 4:
19
+ return messages[2:]
20
+ return messages
business_interaction_agent/utils/prompts.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def business_retrieval_prompt(user_message, retrievals, business_details):
2
+ return f'''
3
+ You are a professional AI assistant helping users understand how influencers can support their business. You will be given:
4
+
5
+ - A **user message**
6
+ - A list of **retrieved influencer data**
7
+ - The **business details** provided by the user
8
+
9
+ Your job is to:
10
+ 1. **First**, analyze the user message and decide if it is actually a business-related question or query that could be answered using influencer content.
11
+ - If the message is just a casual greeting like β€œHi”, β€œHello”, β€œHow are you?”, or is not business-related (e.g., β€œWho are you?”, β€œTell me a joke”), then **do NOT use the retrievals or business details**. Just respond to the user naturally and politely.
12
+
13
+ 2. **If the message is business-related**, proceed to:
14
+ a. Analyze the **business details** and check if the user message aligns with the type of business.
15
+ - If there is a mismatch (e.g., business is a restaurant but the user asks about clothing), politely **alert the user** about the mismatch.
16
+ - Still, go ahead and provide a helpful answer using relevant influencer data if available.
17
+ b. Analyze the influencer data and explain how each influencer might support the business based only on the retrieved content.
18
+
19
+ --- USER MESSAGE ---
20
+ {user_message}
21
+
22
+ --- BUSINESS DETAILS ---
23
+ {business_details}
24
+
25
+ --- START OF RETRIEVALS ---
26
+ {retrievals}
27
+ --- END OF RETRIEVALS ---
28
+
29
+ Rules:
30
+ - If the user message is **not relevant to influencer or business help**, politely respond in a general helpful way and ignore the retrievals and business details.
31
+ - If the message **is relevant**, then:
32
+ - First verify if the user’s business type matches the context of their message.
33
+ - If not, display a short alert to the user like: β€œNote: Your query seems to focus on [X], but your business is about [Y].”
34
+ - Then analyze the influencer stories:
35
+ - Identify which influencer content is relevant.
36
+ - Explain what they are promoting and how it might help the business.
37
+ - You **must mention influencer usernames** and only use what is in the retrievals.
38
+ - Do NOT invent or assume any information beyond what is explicitly provided in the retrievals and business details.
39
+
40
+ Keep your response:
41
+ - Context-aware
42
+ - Grounded only in the given data
43
+ - Helpful, concise, and user-friendly
44
+ '''
business_interaction_agent/utils/state.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, ConfigDict
2
+ from typing import Optional
3
+
4
+ class State(BaseModel):
5
+ interactions: Optional[list] = []
6
+ model_config = ConfigDict(arbitrary_types_allowed=True)
business_interaction_agent/utils/tools.py ADDED
File without changes
business_interaction_agent/utils/utils.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import faiss
3
+ import re
4
+ from utils.models_loader import ST
5
+ import numpy as np
6
+ import ast
7
+
8
+
9
+
10
+ def manual_retrieval(messages, business_details):
11
+ # === Load CSV ===
12
+ csv_path = 'extracted_data.csv'
13
+ df = pd.read_csv(csv_path)
14
+
15
+ # === Parse stored embeddings ===
16
+ df['embeddings'] = df['embeddings'].apply(lambda x: ast.literal_eval(x) if isinstance(x, str) else x)
17
+ embeddings = np.vstack(df['embeddings'].values).astype('float32')
18
+
19
+ # === Build FAISS index ===
20
+ dimension = embeddings.shape[1]
21
+ index = faiss.IndexFlatL2(dimension)
22
+ index.add(embeddings)
23
+
24
+ # === Load SentenceTransformer model ===
25
+
26
+ # === Encode the query and search ===
27
+ query_embedding = ST.encode(str(messages)+str(business_details)).reshape(1, -1).astype('float32')
28
+ top_k=3
29
+ distances, indices = index.search(query_embedding, top_k)
30
+
31
+ # === Function to extract sections 1 and 6 ===
32
+ def extract_story_and_branding(full_story):
33
+ full_story = full_story.replace('**6. Visible Texts or Brandings**', '**6. Visible Texts or Brandings:**')
34
+ full_story = full_story.replace('**1. Story**', '**1. Story:**')
35
+
36
+ pattern = (
37
+ r"\*\*1\. Story:\*\*(.*?)(?=\*\*\d+\.\s)"
38
+ r".*?"
39
+ r"\*\*6\. Visible Texts or Brandings:\*\*(.*?)(?=\*\*\d+\.\s|$)"
40
+ )
41
+ match = re.search(pattern, full_story, re.DOTALL)
42
+ if match:
43
+ story_section = match.group(1).strip()
44
+ branding_section = match.group(2).strip()
45
+ return f"Story:\n{story_section}\n\nVisible Texts or Brandings:\n{branding_section}"
46
+ else:
47
+ return "Requested sections not found."
48
+
49
+ # === Format results ===
50
+ outer_list = []
51
+ for i, idx in enumerate(indices[0]):
52
+ res = {
53
+ 'rank': i + 1,
54
+ 'username': df.iloc[idx]['username'],
55
+ 'agentic_story': df.iloc[idx]['agentic_story'],
56
+ 'likesCount': df.iloc[idx]['likesCount'],
57
+ 'commentCount': df.iloc[idx]['commentCount'],
58
+ 'distance': distances[0][i]
59
+ }
60
+
61
+ inner_list = []
62
+ inner_list.append(f"[{res['rank']}]. The influencer name is: **{res['username']}** β€” Likes: **{res['likesCount']}**, Comments: **{res['commentCount']}**")
63
+ inner_list.append(f"The story of that particular video is:\n{extract_story_and_branding(res['agentic_story'])}")
64
+ inner_list.append(f"Distance: {res['distance']:.4f}")
65
+ outer_list.append(inner_list)
66
+
67
+ return str(outer_list)
context_analysis_agent/__init__.py ADDED
File without changes
context_analysis_agent/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (190 Bytes). View file
 
context_analysis_agent/__pycache__/agent.cpython-312.pyc ADDED
Binary file (3.26 kB). View file
 
my_agent/utils/initial_interaction.py β†’ context_analysis_agent/agent.py RENAMED
@@ -1,27 +1,15 @@
1
- import os
2
  from langchain_groq import ChatGroq
3
  from langgraph.graph import StateGraph, MessagesState, START, END
4
  from langgraph.checkpoint.memory import MemorySaver
5
- from langchain_core.messages import SystemMessage
6
- from pydantic import BaseModel, ConfigDict, Field
7
- from typing import Optional, List
8
- from .models_loader import llm
9
- from .prompts import introduction_prompt , details_extract_prompt
10
- from .validators import DetailsFormatter
11
 
12
-
13
- # State model
14
- class State(BaseModel):
15
- interactions: Optional[list] = []
16
- model_config = ConfigDict(arbitrary_types_allowed=True)
17
-
18
- # Global business state (shared)
19
  business_state = State()
20
 
21
  class IntroductionChatbot:
22
  def __init__(self):
23
  self.memory = MemorySaver()
24
- # self.llm = ChatGroq(model_name="Gemma2-9b-It")
25
  self.llm = llm
26
  self.workflow = self._initialize_workflow()
27
  self.interact_agent = self.workflow.compile(checkpointer=self.memory)
@@ -29,17 +17,11 @@ class IntroductionChatbot:
29
 
30
  def _initialize_workflow(self):
31
  workflow = StateGraph(MessagesState)
32
- workflow.add_node("chatbot", self._call_model)
33
  workflow.add_edge(START, "chatbot")
34
  workflow.add_edge("chatbot", END)
35
  return workflow
36
 
37
- def _call_model(self, state):
38
- template = introduction_prompt
39
- messages = [SystemMessage(content=template)] + state["messages"]
40
- response = self.llm.invoke(messages)
41
- return {"messages": [response]}
42
-
43
  def chat(self, user_input: str):
44
  self.messages.append({"role": "user", "content": user_input})
45
  config = {"configurable": {"thread_id": "1"}}
@@ -52,10 +34,6 @@ class IntroductionChatbot:
52
  return "Thanks for providing all your required business details" in latest_response
53
 
54
  def extract_details(self):
55
- template = details_extract_prompt(business_state.interactions)
56
-
57
- messages = [SystemMessage(content=template)]
58
- response = self.llm.with_structured_output(DetailsFormatter).invoke(messages)
59
- print('Extracetd details:',response)
60
- return response
61
-
 
 
1
  from langchain_groq import ChatGroq
2
  from langgraph.graph import StateGraph, MessagesState, START, END
3
  from langgraph.checkpoint.memory import MemorySaver
4
+ from .utils.state import State
5
+ from .utils.nodes import introduction_node, extract_business_details
6
+ from utils.models_loader import llm
 
 
 
7
 
 
 
 
 
 
 
 
8
  business_state = State()
9
 
10
  class IntroductionChatbot:
11
  def __init__(self):
12
  self.memory = MemorySaver()
 
13
  self.llm = llm
14
  self.workflow = self._initialize_workflow()
15
  self.interact_agent = self.workflow.compile(checkpointer=self.memory)
 
17
 
18
  def _initialize_workflow(self):
19
  workflow = StateGraph(MessagesState)
20
+ workflow.add_node("chatbot", lambda state: introduction_node(state, self.llm))
21
  workflow.add_edge(START, "chatbot")
22
  workflow.add_edge("chatbot", END)
23
  return workflow
24
 
 
 
 
 
 
 
25
  def chat(self, user_input: str):
26
  self.messages.append({"role": "user", "content": user_input})
27
  config = {"configurable": {"thread_id": "1"}}
 
34
  return "Thanks for providing all your required business details" in latest_response
35
 
36
  def extract_details(self):
37
+ response = extract_business_details(business_state.interactions)
38
+ print('Extracted details:', response)
39
+ return response
 
 
 
 
context_analysis_agent/utils/__init__.py ADDED
File without changes
context_analysis_agent/utils/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (196 Bytes). View file
 
context_analysis_agent/utils/__pycache__/nodes.cpython-312.pyc ADDED
Binary file (1.17 kB). View file
 
context_analysis_agent/utils/__pycache__/prompts.cpython-312.pyc ADDED
Binary file (1.83 kB). View file
 
context_analysis_agent/utils/__pycache__/state.cpython-312.pyc ADDED
Binary file (1.43 kB). View file
 
context_analysis_agent/utils/__pycache__/utils.cpython-312.pyc ADDED
Binary file (1.79 kB). View file