Spaces:
Sleeping
Sleeping
Commit
·
b55b8d4
1
Parent(s):
c32fa91
Included Business interaction too
Browse files- __pycache__/main.cpython-312.pyc +0 -0
- main.py +24 -68
- my_agent/__pycache__/agent.cpython-312.pyc +0 -0
- my_agent/utils/__pycache__/initial_interaction.cpython-312.pyc +0 -0
- my_agent/utils/__pycache__/nodes.cpython-312.pyc +0 -0
- my_agent/utils/__pycache__/state.cpython-312.pyc +0 -0
- my_agent/utils/initial_interaction.py +97 -0
- my_agent/utils/nodes.py +45 -53
- my_agent/utils/state.py +2 -0
__pycache__/main.cpython-312.pyc
CHANGED
|
Binary files a/__pycache__/main.cpython-312.pyc and b/__pycache__/main.cpython-312.pyc differ
|
|
|
main.py
CHANGED
|
@@ -3,87 +3,43 @@ from pydantic import BaseModel
|
|
| 3 |
from my_agent.agent import build_graph
|
| 4 |
import pandas as pd
|
| 5 |
from typing import Optional
|
|
|
|
| 6 |
|
| 7 |
app = FastAPI()
|
|
|
|
| 8 |
graph = build_graph()
|
| 9 |
|
|
|
|
| 10 |
class RequestInput(BaseModel):
|
| 11 |
query: list
|
| 12 |
preferred_topics: Optional[list] = []
|
| 13 |
|
| 14 |
|
| 15 |
-
@app.post("/run")
|
| 16 |
-
def run_graph(input_data: RequestInput):
|
| 17 |
-
result = graph.invoke({'topic' : input_data.query})
|
| 18 |
-
return {'final_story': result['final_story']}
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
# *********************INFERENCING PART****************************
|
| 22 |
-
# import asyncio
|
| 23 |
-
# from fastapi import FastAPI, Request, Form
|
| 24 |
-
# from fastapi.responses import HTMLResponse
|
| 25 |
-
# from fastapi.staticfiles import StaticFiles
|
| 26 |
-
# from fastapi.templating import Jinja2Templates
|
| 27 |
-
# from my_agent.agent import build_graph
|
| 28 |
-
# from starlette.concurrency import run_in_threadpool
|
| 29 |
-
|
| 30 |
-
# app = FastAPI()
|
| 31 |
-
# graph = build_graph()
|
| 32 |
-
|
| 33 |
-
# app.mount("/static", StaticFiles(directory="static"), name="static")
|
| 34 |
-
# templates = Jinja2Templates(directory="templates")
|
| 35 |
|
| 36 |
-
# # Store session state in memory (simple approach)
|
| 37 |
-
# session_data = {
|
| 38 |
-
# "topic": [],
|
| 39 |
-
# "preferred_topics": [],
|
| 40 |
-
# }
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
# return templates.TemplateResponse("index.html", {"request": request})
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
# })
|
| 57 |
|
| 58 |
-
# @app.post("/select", response_class=HTMLResponse)
|
| 59 |
-
# async def select_topics(request: Request, selected_topics: list[str] = Form(...), action: str = Form(...)):
|
| 60 |
-
# session_data["preferred_topics"].append(selected_topics)
|
| 61 |
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
#
|
| 65 |
-
|
| 66 |
-
#
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
# return templates.TemplateResponse("index.html", {
|
| 72 |
-
# "request": request,
|
| 73 |
-
# "topics": new_brainstorm,
|
| 74 |
-
# "story": None,
|
| 75 |
-
# "show_buttons": True
|
| 76 |
-
# })
|
| 77 |
|
| 78 |
-
# elif action == "generate_final_story":
|
| 79 |
-
# result = await run_in_threadpool(
|
| 80 |
-
# graph.invoke,
|
| 81 |
-
# {"topic": session_data["topic"], "preferred_topics": session_data["preferred_topics"]}
|
| 82 |
-
# )
|
| 83 |
-
# return templates.TemplateResponse("index.html", {
|
| 84 |
-
# "request": request,
|
| 85 |
-
# "topics": [],
|
| 86 |
-
# "story": result.get("final_story"),
|
| 87 |
-
# "show_buttons": False
|
| 88 |
-
# })
|
| 89 |
|
|
|
|
| 3 |
from my_agent.agent import build_graph
|
| 4 |
import pandas as pd
|
| 5 |
from typing import Optional
|
| 6 |
+
from my_agent.utils.initial_interaction import BusinessInteractionChatbot
|
| 7 |
|
| 8 |
app = FastAPI()
|
| 9 |
+
interaction_chatbot = BusinessInteractionChatbot()
|
| 10 |
graph = build_graph()
|
| 11 |
|
| 12 |
+
|
| 13 |
class RequestInput(BaseModel):
|
| 14 |
query: list
|
| 15 |
preferred_topics: Optional[list] = []
|
| 16 |
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
+
class UserMessage(BaseModel):
|
| 21 |
+
message: str
|
|
|
|
| 22 |
|
| 23 |
+
details_for_brainstrom = {}
|
| 24 |
+
@app.post("/business-interaction")
|
| 25 |
+
def business_chat(msg: UserMessage):
|
| 26 |
+
global details_for_brainstrom
|
| 27 |
+
response = interaction_chatbot.chat(msg.message)
|
| 28 |
+
if interaction_chatbot.is_complete(response):
|
| 29 |
+
details = interaction_chatbot.extract_details()
|
| 30 |
+
details_for_brainstrom = details
|
| 31 |
+
return {"response": response, "business_details": details, "complete": True}
|
| 32 |
+
return {"response": response, "complete": False}
|
|
|
|
| 33 |
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
+
@app.post("/brainstrom")
|
| 36 |
+
def run_graph(input_data: RequestInput):
|
| 37 |
+
# business_details = details_for_brainstrom
|
| 38 |
+
result = graph.invoke({'topic' : input_data.query , 'business_details': details_for_brainstrom})
|
| 39 |
+
# RequestInput.preferred_topics=result['preferred_topics']
|
| 40 |
+
return {'final_story': result['final_story'],
|
| 41 |
+
'business_details':result['business_details'],
|
| 42 |
+
}
|
| 43 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
my_agent/__pycache__/agent.cpython-312.pyc
CHANGED
|
Binary files a/my_agent/__pycache__/agent.cpython-312.pyc and b/my_agent/__pycache__/agent.cpython-312.pyc differ
|
|
|
my_agent/utils/__pycache__/initial_interaction.cpython-312.pyc
ADDED
|
Binary file (6.51 kB). View file
|
|
|
my_agent/utils/__pycache__/nodes.cpython-312.pyc
CHANGED
|
Binary files a/my_agent/utils/__pycache__/nodes.cpython-312.pyc and b/my_agent/utils/__pycache__/nodes.cpython-312.pyc differ
|
|
|
my_agent/utils/__pycache__/state.cpython-312.pyc
CHANGED
|
Binary files a/my_agent/utils/__pycache__/state.cpython-312.pyc and b/my_agent/utils/__pycache__/state.cpython-312.pyc differ
|
|
|
my_agent/utils/initial_interaction.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from langchain_groq import ChatGroq
|
| 3 |
+
from langgraph.graph import StateGraph, MessagesState, START, END
|
| 4 |
+
from langgraph.checkpoint.memory import MemorySaver
|
| 5 |
+
from langchain_core.messages import SystemMessage
|
| 6 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 7 |
+
from typing import Optional, List
|
| 8 |
+
from .models_loader import llm
|
| 9 |
+
|
| 10 |
+
# Pydantic model for extracted business info
|
| 11 |
+
class DetailsFormatter(BaseModel):
|
| 12 |
+
business_type: str = Field(description="The type of the business")
|
| 13 |
+
platform: str = Field(description="The platform used for the business")
|
| 14 |
+
target_audience: str = Field(description="The target audience of the business")
|
| 15 |
+
business_goals: str = Field(description="The business goals of the business")
|
| 16 |
+
offerings: str = Field(description="The offerings of the business")
|
| 17 |
+
Challenges_faced: str = Field(description="The challenges faced by the business")
|
| 18 |
+
|
| 19 |
+
# State model
|
| 20 |
+
class State(BaseModel):
|
| 21 |
+
interactions: Optional[list] = []
|
| 22 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
| 23 |
+
|
| 24 |
+
# Global business state (shared)
|
| 25 |
+
business_state = State()
|
| 26 |
+
|
| 27 |
+
class BusinessInteractionChatbot:
|
| 28 |
+
def __init__(self):
|
| 29 |
+
self.memory = MemorySaver()
|
| 30 |
+
# self.llm = ChatGroq(model_name="Gemma2-9b-It")
|
| 31 |
+
self.llm = llm
|
| 32 |
+
self.workflow = self._initialize_workflow()
|
| 33 |
+
self.interact_agent = self.workflow.compile(checkpointer=self.memory)
|
| 34 |
+
self.messages = []
|
| 35 |
+
|
| 36 |
+
def _initialize_workflow(self):
|
| 37 |
+
workflow = StateGraph(MessagesState)
|
| 38 |
+
workflow.add_node("chatbot", self._call_model)
|
| 39 |
+
workflow.add_edge(START, "chatbot")
|
| 40 |
+
workflow.add_edge("chatbot", END)
|
| 41 |
+
return workflow
|
| 42 |
+
|
| 43 |
+
def _call_model(self, state):
|
| 44 |
+
template = self._get_prompt_template()
|
| 45 |
+
messages = [SystemMessage(content=template)] + state["messages"]
|
| 46 |
+
response = self.llm.invoke(messages)
|
| 47 |
+
return {"messages": [response]}
|
| 48 |
+
|
| 49 |
+
def _get_prompt_template(self):
|
| 50 |
+
return (
|
| 51 |
+
'''You are a business assistant who collects only valid and relevant data.
|
| 52 |
+
Your job is to gather details from business owners in a friendly and conversational manner to understand their business better. Ask in very easy and short way.
|
| 53 |
+
|
| 54 |
+
We need these details:
|
| 55 |
+
1. Business Type (e.g., e-commerce, SaaS, consulting),
|
| 56 |
+
2. Platform(s) used (e.g., website, app, Instagram),
|
| 57 |
+
3. Target Audience (who are their customers or clients),
|
| 58 |
+
4. Business Goals (short-term or long-term objectives),
|
| 59 |
+
5. Offerings (products or services they provide),
|
| 60 |
+
6. Challenges faced (any current business problems or limitations).
|
| 61 |
+
|
| 62 |
+
Keep interacting until all valid details are collected.
|
| 63 |
+
|
| 64 |
+
VERY IMPORTANT: Once all valid details are received, say: '**Thanks for providing all your required business details.**'
|
| 65 |
+
'''
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
def chat(self, user_input: str):
|
| 69 |
+
self.messages.append({"role": "user", "content": user_input})
|
| 70 |
+
config = {"configurable": {"thread_id": "1"}}
|
| 71 |
+
response = self.interact_agent.invoke({"messages": [user_input]}, config)['messages'][-1].content
|
| 72 |
+
self.messages.append({"role": "assistant", "content": response})
|
| 73 |
+
business_state.interactions.append({'user': user_input, 'agent_response': response})
|
| 74 |
+
return response
|
| 75 |
+
|
| 76 |
+
def is_complete(self, latest_response: str) -> bool:
|
| 77 |
+
return "Thanks for providing all your required business details" in latest_response
|
| 78 |
+
|
| 79 |
+
def extract_details(self):
|
| 80 |
+
template = f'''Extract the following details of the business from the conversation.
|
| 81 |
+
1. Business Type (e.g., e-commerce, SaaS, consulting),
|
| 82 |
+
2. Platform(s) used (e.g., website, app, Instagram),
|
| 83 |
+
3. Target Audience (who are their customers or clients),
|
| 84 |
+
4. Business Goals (short-term or long-term objectives),
|
| 85 |
+
5. Offerings (products or services they provide),
|
| 86 |
+
6. Challenges faced (any current business problems or limitations).
|
| 87 |
+
The conversation is:\n{business_state.interactions}'''
|
| 88 |
+
|
| 89 |
+
messages = [SystemMessage(content=template)]
|
| 90 |
+
response = self.llm.bind_tools([DetailsFormatter]).invoke(messages)
|
| 91 |
+
|
| 92 |
+
if hasattr(response, 'tool_calls') and response.tool_calls:
|
| 93 |
+
return response.tool_calls[0]['args']
|
| 94 |
+
elif hasattr(response, 'content'):
|
| 95 |
+
return response.content
|
| 96 |
+
else:
|
| 97 |
+
return "No response"
|
my_agent/utils/nodes.py
CHANGED
|
@@ -10,23 +10,35 @@ from .data_loader import load_influencer_data
|
|
| 10 |
def retrieve(state: State) -> State:
|
| 11 |
print('Moving to retrieval process')
|
| 12 |
retrievals=[]
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
print('The retrieval is:\n',state.retrievals )
|
| 25 |
# return State(messages="Retrieved",topic=state.topic,retrievals=state.retrievals)
|
| 26 |
return state
|
| 27 |
|
| 28 |
def generate_story(state:State)-> State:
|
| 29 |
-
topic=state.topic
|
| 30 |
print('The state retrieval is:',state.retrievals)
|
| 31 |
retrieval_list= state.retrievals[-1]
|
| 32 |
agentic_stories = []
|
|
@@ -116,6 +128,7 @@ def select_preferred_topics(state: State)-> State:
|
|
| 116 |
try:
|
| 117 |
preferred_indices = [int(i.strip()) for i in raw_input_str.split(",")]
|
| 118 |
preferred_topics = [topic_values[i - 1] for i in preferred_indices if 0 < i <= len(topic_values)]
|
|
|
|
| 119 |
state.preferred_topics.append(preferred_topics)
|
| 120 |
except Exception:
|
| 121 |
state.carry_on=False
|
|
@@ -133,52 +146,31 @@ def select_preferred_topics(state: State)-> State:
|
|
| 133 |
return state
|
| 134 |
|
| 135 |
|
| 136 |
-
# def select_preferred_topics(state: State) -> State:
|
| 137 |
-
# print("---API_feedback_mode---")
|
| 138 |
-
|
| 139 |
-
# if not state.brainstroming_topics:
|
| 140 |
-
# print("No brainstormed topics found.")
|
| 141 |
-
# state.carry_on = False
|
| 142 |
-
# return state
|
| 143 |
-
|
| 144 |
-
# # Get the latest set of brainstormed topics
|
| 145 |
-
# topic_values = list(state.brainstroming_topics[-1].values())
|
| 146 |
-
# print(f"Available topics: {topic_values}")
|
| 147 |
-
|
| 148 |
-
# # Ensure preferred_topics is well-formed
|
| 149 |
-
# if state.preferred_topics and isinstance(state.preferred_topics[-1], list):
|
| 150 |
-
# latest_selection = state.preferred_topics[-1]
|
| 151 |
-
# if latest_selection:
|
| 152 |
-
# print("User selected topics:")
|
| 153 |
-
# print(latest_selection)
|
| 154 |
-
# state.carry_on = True
|
| 155 |
-
# return state
|
| 156 |
-
|
| 157 |
-
# print("No preferred topics selected via API. Ending feedback loop.")
|
| 158 |
-
# state.carry_on = False
|
| 159 |
-
# return state
|
| 160 |
-
|
| 161 |
|
| 162 |
|
| 163 |
|
| 164 |
def generate_final_story(state:State)-> State:
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
response
|
| 176 |
-
|
| 177 |
-
response
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
return state
|
| 183 |
|
| 184 |
|
|
|
|
| 10 |
def retrieve(state: State) -> State:
|
| 11 |
print('Moving to retrieval process')
|
| 12 |
retrievals=[]
|
| 13 |
+
if len(state.preferred_topics)==0:
|
| 14 |
+
for topic in state.topic: # Loop through each topic
|
| 15 |
+
embedded_query = ST.encode(topic) # Embed each topic
|
| 16 |
+
data = load_influencer_data()
|
| 17 |
+
scores, retrieved_examples = data.get_nearest_examples("embeddings", embedded_query, k=1)
|
| 18 |
+
|
| 19 |
+
# Construct a list of dictionaries for this topic
|
| 20 |
+
result = [{user: story} for user, story in zip(retrieved_examples['username'], retrieved_examples['agentic_story'])]
|
| 21 |
+
retrievals.append(result)
|
| 22 |
+
print('Retrieval process completed......')
|
| 23 |
+
state.retrievals.append(retrievals)
|
| 24 |
+
|
| 25 |
+
if len (state.preferred_topics)>0:
|
| 26 |
+
for topic in state.preferred_topics[-1]: # Loop through each topic
|
| 27 |
+
embedded_query = ST.encode(topic) # Embed each topic
|
| 28 |
+
data = load_influencer_data()
|
| 29 |
+
scores, retrieved_examples = data.get_nearest_examples("embeddings", embedded_query, k=1)
|
| 30 |
+
|
| 31 |
+
# Construct a list of dictionaries for this topic
|
| 32 |
+
result = [{user: story} for user, story in zip(retrieved_examples['username'], retrieved_examples['agentic_story'])]
|
| 33 |
+
retrievals.append(result)
|
| 34 |
+
print('Retrieval process completed......')
|
| 35 |
+
state.retrievals.append(retrievals)
|
| 36 |
|
| 37 |
print('The retrieval is:\n',state.retrievals )
|
| 38 |
# return State(messages="Retrieved",topic=state.topic,retrievals=state.retrievals)
|
| 39 |
return state
|
| 40 |
|
| 41 |
def generate_story(state:State)-> State:
|
|
|
|
| 42 |
print('The state retrieval is:',state.retrievals)
|
| 43 |
retrieval_list= state.retrievals[-1]
|
| 44 |
agentic_stories = []
|
|
|
|
| 128 |
try:
|
| 129 |
preferred_indices = [int(i.strip()) for i in raw_input_str.split(",")]
|
| 130 |
preferred_topics = [topic_values[i - 1] for i in preferred_indices if 0 < i <= len(topic_values)]
|
| 131 |
+
# preferred_topics = user_input
|
| 132 |
state.preferred_topics.append(preferred_topics)
|
| 133 |
except Exception:
|
| 134 |
state.carry_on=False
|
|
|
|
| 146 |
return state
|
| 147 |
|
| 148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
|
| 151 |
|
| 152 |
def generate_final_story(state:State)-> State:
|
| 153 |
+
if len(state.preferred_topics)>0:
|
| 154 |
+
template = f'''I want to create a detailed storyline for a video in the given topic. You have to provide me that storyline what to include in the video.
|
| 155 |
+
Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you.
|
| 156 |
+
You can use this format for the reference purpose, not for the exact similar generation. The format is:\n{state.retrievals[-1]}.
|
| 157 |
+
\n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{state.topic}\n\n
|
| 158 |
+
|
| 159 |
+
**Final Reminder** You have to strongly focus on these topics while creating the storyline: {[item for sublist in state.preferred_topics for item in sublist]}'''
|
| 160 |
+
messages = [SystemMessage(content=template)]
|
| 161 |
+
response = llm.bind_tools([StoryFormatter]).invoke(messages)
|
| 162 |
+
print('The final response is:',response)
|
| 163 |
+
if hasattr(response, 'tool_calls') and response.tool_calls:
|
| 164 |
+
response = response.tool_calls[0]['args']
|
| 165 |
+
elif hasattr(response, 'content'):
|
| 166 |
+
response = response.content
|
| 167 |
+
else:
|
| 168 |
+
response = "No response"
|
| 169 |
+
state.final_story=response
|
| 170 |
+
state.stories.append(response)
|
| 171 |
+
return state
|
| 172 |
+
|
| 173 |
+
state.final_story=state.stories[-1]
|
| 174 |
return state
|
| 175 |
|
| 176 |
|
my_agent/utils/state.py
CHANGED
|
@@ -11,4 +11,6 @@ class State(BaseModel):
|
|
| 11 |
stories : Optional[list]=[]
|
| 12 |
final_story: Optional[str]=None
|
| 13 |
retrievals : Optional[list]=[]
|
|
|
|
|
|
|
| 14 |
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
|
|
| 11 |
stories : Optional[list]=[]
|
| 12 |
final_story: Optional[str]=None
|
| 13 |
retrievals : Optional[list]=[]
|
| 14 |
+
business_details : Optional[dict]={}
|
| 15 |
+
latest_preferred_topics: Optional[list] = []
|
| 16 |
model_config = ConfigDict(arbitrary_types_allowed=True)
|