Spaces:
Sleeping
Sleeping
| import os | |
| from langchain_groq import ChatGroq | |
| from langgraph.graph import StateGraph, MessagesState, START, END | |
| from langgraph.checkpoint.memory import MemorySaver | |
| from langchain_core.messages import SystemMessage | |
| from pydantic import BaseModel, ConfigDict, Field | |
| from typing import Optional, List | |
| from .models_loader import llm | |
| from .prompts import business_interaction_prompt | |
| # Pydantic model for extracted business info | |
| class DetailsFormatter(BaseModel): | |
| business_type: str = Field(description="The type of the business") | |
| platform: str = Field(description="The platform used for the business") | |
| target_audience: str = Field(description="The target audience of the business") | |
| business_goals: str = Field(description="The business goals of the business") | |
| offerings: str = Field(description="The offerings of the business") | |
| Challenges_faced: str = Field(description="The challenges faced by the business") | |
| # State model | |
| class State(BaseModel): | |
| interactions: Optional[list] = [] | |
| model_config = ConfigDict(arbitrary_types_allowed=True) | |
| # Global business state (shared) | |
| business_state = State() | |
| class BusinessInteractionChatbot: | |
| def __init__(self): | |
| self.memory = MemorySaver() | |
| # self.llm = ChatGroq(model_name="Gemma2-9b-It") | |
| self.llm = llm | |
| self.workflow = self._initialize_workflow() | |
| self.interact_agent = self.workflow.compile(checkpointer=self.memory) | |
| self.messages = [] | |
| def _initialize_workflow(self): | |
| workflow = StateGraph(MessagesState) | |
| workflow.add_node("chatbot", self._call_model) | |
| workflow.add_edge(START, "chatbot") | |
| workflow.add_edge("chatbot", END) | |
| return workflow | |
| def _call_model(self, state): | |
| template = business_interaction_prompt | |
| messages = [SystemMessage(content=template)] + state["messages"] | |
| response = self.llm.invoke(messages) | |
| return {"messages": [response]} | |
| def _get_prompt_template(self): | |
| return ( | |
| '''You are a business assistant who collects only valid and relevant data. | |
| Your job is to gather details from business owners in a friendly and conversational manner to understand their business better. Ask in very easy and short way. | |
| We need these details: | |
| 1. Business Type (e.g., e-commerce, SaaS, consulting), | |
| 2. Platform(s) used (e.g., website, app, Instagram), | |
| 3. Target Audience (who are their customers or clients), | |
| 4. Business Goals (short-term or long-term objectives), | |
| 5. Offerings (products or services they provide), | |
| 6. Challenges faced (any current business problems or limitations). | |
| Keep interacting until all valid details are collected. | |
| VERY IMPORTANT: Once all valid details are received, say: '**Thanks for providing all your required business details.**' | |
| ''' | |
| ) | |
| def chat(self, user_input: str): | |
| self.messages.append({"role": "user", "content": user_input}) | |
| config = {"configurable": {"thread_id": "1"}} | |
| response = self.interact_agent.invoke({"messages": [user_input]}, config)['messages'][-1].content | |
| self.messages.append({"role": "assistant", "content": response}) | |
| business_state.interactions.append({'user': user_input, 'agent_response': response}) | |
| return response | |
| def is_complete(self, latest_response: str) -> bool: | |
| return "Thanks for providing all your required business details" in latest_response | |
| def extract_details(self): | |
| template = f'''Extract the following details of the business from the conversation. | |
| 1. Business Type (e.g., e-commerce, SaaS, consulting), | |
| 2. Platform(s) used (e.g., website, app, Instagram), | |
| 3. Target Audience (who are their customers or clients), | |
| 4. Business Goals (short-term or long-term objectives), | |
| 5. Offerings (products or services they provide), | |
| 6. Challenges faced (any current business problems or limitations). | |
| The conversation is:\n{business_state.interactions}''' | |
| messages = [SystemMessage(content=template)] | |
| response = self.llm.bind_tools([DetailsFormatter]).invoke(messages) | |
| if hasattr(response, 'tool_calls') and response.tool_calls: | |
| return response.tool_calls[0]['args'] | |
| elif hasattr(response, 'content'): | |
| return response.content | |
| else: | |
| return "No response" | |