DrishtiSharma commited on
Commit
c6a7187
·
verified ·
1 Parent(s): 8295fd8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -65
app.py CHANGED
@@ -1,68 +1,101 @@
 
1
  import streamlit as st
2
  import pandas as pd
3
  import matplotlib.pyplot as plt
4
- from langchain_core.prompts import PromptTemplate
5
- from langchain_community.llms import OpenAI
6
- from langchain_core.chains import LLMChain
7
-
8
-
9
- # Fetch API Keys from environment variables
10
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
11
-
12
- # Verify required environment variables are set
13
- if not OPENAI_API_KEY:
14
- raise ValueError("Missing required environment variable: 'OPENAI_API_KEY'")
15
-
16
-
17
- # Title of the App
18
- st.title("ReAct Agent")
19
-
20
- # Step 1: User Input Collection
21
- user_input = st.text_area("Enter the input prompt for the LLM:", placeholder="Describe what you want...")
22
- num_intermediate_steps = st.number_input("Number of Intermediate Steps to Display:", min_value=1, value=3)
23
-
24
- # Check if inputs are provided
25
- if st.button("Run Processing"):
26
- if not api_key or not user_input:
27
- st.error("Please provide all required inputs!")
28
- else:
29
- st.success("Inputs received! Processing...")
30
-
31
- # Step 2: LLM Initialization and Template
32
- st.header("Step 2: LLM Processing - Intermediate Outputs")
33
- try:
34
- llm = OpenAI(api_key=api_key, temperature=0.7)
35
- prompt = PromptTemplate(
36
- input_variables=["text"],
37
- template="Process the following text step-by-step: {text}"
38
- )
39
-
40
- chain = LLMChain(llm=llm, prompt=prompt)
41
- intermediate_outputs = []
42
-
43
- # Simulate multiple intermediate outputs
44
- for step in range(num_intermediate_steps):
45
- response = chain.run(user_input + f" Step {step+1}")
46
- intermediate_outputs.append(response)
47
- st.write(f"**Intermediate Step {step+1}:**")
48
- st.info(response)
49
-
50
- # Step 3: Final Processing
51
- st.header("Step 3: Visualization")
52
- # Convert responses to a DataFrame
53
- df = pd.DataFrame({"Step": [f"Step {i+1}" for i in range(num_intermediate_steps)],
54
- "Output": intermediate_outputs})
55
- st.dataframe(df)
56
-
57
- # Plot the outputs' length as a graph
58
- st.subheader("Output Length per Step")
59
- df['Length'] = df['Output'].apply(len)
60
- fig, ax = plt.subplots()
61
- ax.bar(df['Step'], df['Length'])
62
- ax.set_xlabel("Steps")
63
- ax.set_ylabel("Output Length")
64
- ax.set_title("Text Length at Each Step")
65
- st.pyplot(fig)
66
-
67
- except Exception as e:
68
- st.error(f"An error occurred: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import streamlit as st
3
  import pandas as pd
4
  import matplotlib.pyplot as plt
5
+ from langchain_community.tools.tavily_search import TavilySearchResults
6
+ from langchain_openai import ChatOpenAI
7
+ from langgraph.graph import MessagesState
8
+ from langgraph.graph import START, StateGraph
9
+ from langgraph.prebuilt import tools_condition
10
+ from langgraph.prebuilt import ToolNode
11
+ from langchain_core.messages import HumanMessage, SystemMessage
12
+ from IPython.display import Image, display
13
+
14
+ # ------------------- Environment Variable Setup -------------------
15
+ # Fetch API keys from environment variables
16
+ openai_api_key = os.getenv("OPENAI_API_KEY")
17
+ tavily_api_key = os.getenv("TAVILY_API_KEY")
18
+
19
+ # Verify if API keys are set
20
+ if not openai_api_key:
21
+ raise ValueError("Missing required environment variable: OPENAI_API_KEY")
22
+ if not tavily_api_key:
23
+ raise ValueError("Missing required environment variable: TAVILY_API_KEY")
24
+
25
+ # ------------------- Tool Definitions -------------------
26
+ # Tavily Search Tool
27
+ tavily_tool = TavilySearchResults(max_results=5)
28
+
29
+ def multiply(a: int, b: int) -> int:
30
+ """Multiply two numbers."""
31
+ return a * b
32
+
33
+ def add(a: int, b: int) -> int:
34
+ """Add two numbers."""
35
+ return a + b
36
+
37
+ def divide(a: int, b: int) -> float:
38
+ """Divide two numbers."""
39
+ if b == 0:
40
+ raise ValueError("Division by zero is not allowed.")
41
+ return a / b
42
+
43
+ # Combine tools
44
+ tools = [add, multiply, divide, tavily_tool]
45
+
46
+ # ------------------- LLM and System Message Setup -------------------
47
+ llm = ChatOpenAI(model="gpt-4o-mini")
48
+ llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)
49
+ sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic and search on a set of inputs.")
50
+
51
+ # ------------------- LangGraph Workflow -------------------
52
+ def assistant(state: MessagesState):
53
+ """Assistant node to invoke LLM with tools."""
54
+ return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
55
+
56
+ # Define the graph
57
+ app_graph = StateGraph(MessagesState)
58
+ app_graph.add_node("assistant", assistant)
59
+ app_graph.add_node("tools", ToolNode(tools))
60
+ app_graph.add_edge(START, "assistant")
61
+ app_graph.add_conditional_edges("assistant", tools_condition)
62
+ app_graph.add_edge("tools", "assistant")
63
+ react_graph = app_graph.compile()
64
+
65
+ # Display graph visualization
66
+ st.header("LangGraph Workflow Visualization")
67
+ try:
68
+ image_path = react_graph.get_graph(xray=True).draw_mermaid_png()
69
+ display(Image(image_path))
70
+ st.image(image_path, caption="LangGraph Workflow Visualization")
71
+ except Exception as e:
72
+ st.error(f"Failed to display graph visualization: {e}")
73
+
74
+ # ------------------- Streamlit Interface -------------------
75
+ #st.title("ReAct Agent with Arithmetic and Search")
76
+
77
+ # Prompt user for inputs
78
+ user_question = st.text_area("Enter your question:",
79
+ placeholder="Example: 'Add 3 and 4. Multiply the result by 2. Divide it by 5.'")
80
+
81
+ if st.button("Submit"):
82
+ if not user_question.strip():
83
+ st.error("Please enter a valid question.")
84
+ st.stop()
85
+
86
+ st.info("Processing your question...")
87
+ messages = [HumanMessage(content=user_question)]
88
+ response = react_graph.invoke({"messages": messages})
89
+
90
+ # Display results
91
+ st.subheader("Responses")
92
+ for m in response['messages']:
93
+ st.write(m.content)
94
+
95
+ st.success("Processing complete!")
96
+
97
+ # Example Placeholder Suggestions
98
+ st.sidebar.subheader("Example Questions")
99
+ st.sidebar.write("- Add 3 and 4. Multiply the result by 2. Divide it by 5.")
100
+ st.sidebar.write("- Tell me how many centuries Virat Kohli scored.")
101
+ st.sidebar.write("- Search for the tallest building in the world.")