Spaces:
Sleeping
Sleeping
| import os | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from langsmith import Client, traceable | |
| from langchain_core.output_parsers import StrOutputParser | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from langsmith import Client, traceable | |
| from langchain_core.output_parsers import StrOutputParser | |
| from langchain_nomic.embeddings import NomicEmbeddings | |
| from langchain_groq import ChatGroq | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| GROQ_API_KEY = os.getenv('GROQ_API_KEY') | |
| HF_API_KEY = os.getenv("HF_API_KEY") | |
| #LANGSMITH_TRACING="true" | |
| #LANGSMITH_ENDPOINT="https://api.smith.langchain.com" | |
| #LANGSMITH_API_KEY=os.getenv("LANGSMITH_API_KEY") | |
| #LANGSMITH_PROJECT="pr-internal-hand-91" | |
| model_name="llama-3.1-70b-versatile" | |
| llm = ChatGroq( | |
| temperature=0, | |
| model= "llama-3.3-70b-versatile", #"llama3-70b-8192", | |
| api_key=GROQ_API_KEY, | |
| verbose= True, | |
| max_retries=2, | |
| ) | |
| #@traceable | |
| def get_answer(question): | |
| prompt = ChatPromptTemplate.from_messages([ | |
| ("system", "You are a hydroponic AI assistant answer questions of the user with details and here is some facts about the hydroponic farming: {facts}"), | |
| ("user", "{question}") | |
| ]) | |
| chain = prompt | llm | |
| parser = StrOutputParser() | |
| chain = prompt | llm | parser | |
| answer= chain.invoke({"question": question, "facts": fake_db_retrieval()}) | |
| return answer | |
| embedding_model = NomicEmbeddings(model="nomic-embed-text-v1.5", inference_mode="local") | |
| db = "tomato1" | |
| from langchain.vectorstores import Chroma | |
| vector_store = Chroma( | |
| collection_name="chromadb3", | |
| persist_directory=db, | |
| embedding_function=embedding_model, | |
| ) | |
| from langchain.chains.conversation.memory import ConversationBufferWindowMemory | |
| from langchain.chains import RetrievalQA | |
| conversational_memory = ConversationBufferWindowMemory( | |
| memory_key='chat_history', | |
| k=5, #Number of messages stored in memory | |
| return_messages=True #Must return the messages in the response. | |
| ) | |
| qa = RetrievalQA.from_chain_type( | |
| llm=llm, | |
| chain_type="stuff", | |
| retriever=vector_store.as_retriever(k=5) | |
| ) | |
| from langchain.agents import Tool | |
| #Defining the list of tool objects to be used by LangChain. | |
| tools = [ | |
| Tool( | |
| name='Hydroponic Knowledge database', | |
| func=qa.run, | |
| description=( | |
| 'use this tool when answering Hydroponic knowledge queries to get ' | |
| 'more information about the topic but if you dont know or it is not mentioned in the database do not say I can try to provide some general information just say what you know as if you know it from the knowledge database' | |
| ) | |
| ) | |
| ] | |
| from langchain.agents import create_react_agent | |
| from langchain import hub | |
| prompt = hub.pull("hwchase17/react-chat") | |
| agent = create_react_agent( | |
| tools=tools, | |
| llm=llm, | |
| prompt=prompt, | |
| ) | |
| # Create an agent executor by passing in the agent and tools | |
| from langchain.agents import AgentExecutor | |
| agent_executor = AgentExecutor(agent=agent, | |
| tools=tools, | |
| verbose=True, | |
| memory=conversational_memory, | |
| max_iterations=30, | |
| max_execution_time=600, | |
| #early_stopping_method='generate', | |
| handle_parsing_errors=True | |
| ) | |
| # Function for continuing the conversation | |
| import streamlit as st | |
| # Function for continuing the conversation | |
| def continue_conversation(input, history): | |
| # Invoke the agent and get the response | |
| response = agent_executor.invoke({"input": input}) | |
| output = response['output'] | |
| # Prepend the new input and output to the history (latest conversation comes first) | |
| history.insert(0, {"role": "Agricultor", "message": input}) | |
| history.insert(0, {"role": "Hydroponic Agent", "message": output}) | |
| # Return the current response and the full history (hidden state) | |
| return output, history | |
| # Streamlit UI | |
| def main(): | |
| st.set_page_config(page_title="Hydroponic AI Agent", page_icon="👨⚕️") | |
| st.title("Hydroponic AI Agent") | |
| # Initialize the conversation history | |
| if 'history' not in st.session_state: | |
| st.session_state.history = [] | |
| # Sidebar for memory display | |
| with st.sidebar: | |
| st.header("Conversation History") | |
| st.write("This section contains the conversation history.") | |
| # Create a container for the chat | |
| chat_container = st.container() | |
| # Display the chat history with the latest conversation at the top | |
| for chat in st.session_state.history: | |
| if chat['role'] == 'Agricultor': | |
| chat_container.markdown(f"**Agricultor:** {chat['message']}") | |
| else: | |
| chat_container.markdown(f"**Hydroponic AI Assistant:** {chat['message']}") | |
| # User input text box at the bottom | |
| user_input = st.text_input("Ask a question:", key="input", placeholder="Tell me what do you want to know ?") | |
| if user_input: | |
| # Get the response and update the conversation history | |
| output, updated_history = continue_conversation(user_input, st.session_state.history) | |
| # Update the session state with the new history | |
| st.session_state.history = updated_history | |
| # Display memory of past conversation in an expandable section | |
| with st.expander("Memory", expanded=True): | |
| for chat in st.session_state.history: | |
| st.write(f"**{chat['role']}:** {chat['message']}") | |
| if __name__ == "__main__": | |
| main() | |