Anshini commited on
Commit
af9ac6c
·
verified ·
1 Parent(s): 7a0a32e

Update app1.py

Browse files
Files changed (1) hide show
  1. app1.py +114 -195
app1.py CHANGED
@@ -1,214 +1,133 @@
1
- # app.py
2
-
3
  import os
4
  import streamlit as st
5
  from dotenv import load_dotenv
6
- from langchain_groq import ChatGroq
7
- from langgraph.graph import StateGraph, END
8
  from langgraph.checkpoint.memory import MemorySaver
9
- from langchain_core.messages import AIMessage, HumanMessage
10
- from typing import Annotated
11
- from typing_extensions import TypedDict
12
- from langchain_together import Together
13
- from tools import execute_python_code
14
- import io
15
- import contextlib
16
  import traceback
17
- import time
 
18
 
19
  # Load environment variables
20
  load_dotenv()
21
- os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
22
- together_api_key = os.getenv("TOGETHER_API_KEY")
23
-
24
- # LangGraph State definition
25
- class State(TypedDict):
26
- messages: Annotated[list, ...]
27
- name: str
28
- birthday: str
29
  input: str
30
- code: str
31
- explanation: str
32
- execution_result: str
33
-
34
- # LLM
35
- code_generator = Together(
36
- model="deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
37
- temperature=0.2,
38
- max_tokens=1500,
39
- api_key=together_api_key,
40
- )
41
-
42
- # Memory
43
- memory = MemorySaver()
44
-
45
- # Define LangGraph Nodes
46
- def generate_code(state: State):
47
- user_prompt = state["input"]
48
- system_prompt = """You are an expert Python coding assistant specializing in LangGraph applications.
49
- Generate clean, working Python code for the user's request with these requirements:
50
- 1. The code MUST use the LangGraph framework (langgraph library).
51
- 2. Implement a proper flow graph using StateGraph.
52
- 3. Include all necessary imports and make sure the code is complete.
53
- 4. Include code to visualize the flow graph.
54
- 5. Output ONLY the final Python code.
55
-
56
- User request:"""
57
- full_prompt = system_prompt + user_prompt
58
-
59
- for attempt in range(3):
60
- try:
61
- response = code_generator.invoke(full_prompt)
62
- return {**state, "code": str(response)}
63
- except Exception as e:
64
- if "503" in str(e):
65
- time.sleep(2)
66
- else:
67
- raise e
68
- raise Exception("Code generation failed after retries.")
69
-
70
- def execute_code(state: State):
71
  code = state.get("code", "")
72
- buffer = io.StringIO()
73
  try:
74
- with contextlib.redirect_stdout(buffer):
 
 
 
75
  exec(code, {})
76
- output = buffer.getvalue() or "✅ Code executed successfully with no output."
 
 
 
 
 
 
77
  except Exception:
78
- output = "❌ Execution Error:\n" + traceback.format_exc()
79
- return {**state, "execution_result": output}
80
-
81
- def explain_code(state: State):
82
- code = state["code"]
83
- user_prompt = state["input"]
84
- system_prompt = """You are a LangGraph expert who explains code clearly. Provide a detailed explanation of the code in three parts:
85
- 1. LANGGRAPH FLOW: Describe nodes, edges, and how the graph flows.
86
- 2. CODE FLOW: High-level architecture and logic.
87
- 3. STEP-BY-STEP: Explain each part of the code so a beginner can understand it.
88
- 4. VISUALIZATION: Instructions on how to run and see the graph output.
89
  """
90
-
91
- prompt = f"User Prompt: {user_prompt}\n\nCode:\n```python\n{code}\n```"
92
- full_prompt = system_prompt + prompt
93
-
94
- explanation = code_generator.invoke(full_prompt)
95
- return {**state, "explanation": explanation}
96
-
97
- # LangGraph setup
98
- builder = StateGraph(State)
 
99
  builder.add_node("Generate_Code", generate_code)
100
  builder.add_node("Execute_Code", execute_code)
101
- builder.add_node("Code_Explainer", explain_code)
 
102
  builder.set_entry_point("Generate_Code")
103
- graph = builder.compile(checkpointer=memory)
104
-
105
- # Streamlit UI Setup
106
- st.set_page_config(page_title="MitraVerse", layout="wide")
107
-
108
- st.markdown("""
109
- <style>
110
- .stChatMessage {
111
- padding: 12px;
112
- margin-bottom: 12px;
113
- border-radius: 12px;
114
- max-width: 90%;
115
- }
116
- .user {
117
- background-color: #dcf8c6;
118
- align-self: flex-end;
119
- }
120
- .bot {
121
- background-color: #f1f0f0;
122
- align-self: flex-start;
123
- }
124
- .input-box {
125
- display: flex;
126
- align-items: center;
127
- gap: 0.5rem;
128
- }
129
- #floating-container {
130
- display: flex;
131
- align-items: center;
132
- justify-content: space-between;
133
- padding: 0.25rem 0.75rem;
134
- background-color: #f9f9f9;
135
- border-radius: 0.75rem;
136
- margin-top: 1rem;
137
- border: 1px solid #ccc;
138
- }
139
- .floating-popup {
140
- margin-top: 0.5rem;
141
- padding: 0.5rem;
142
- border-radius: 0.5rem;
143
- border: 1px solid #ccc;
144
- background-color: white;
145
- }
146
- </style>
147
- """, unsafe_allow_html=True)
148
-
149
- st.title("🧠 MitraVerse")
150
 
151
  # Initialize session state
152
- if "chat_history" not in st.session_state:
153
- st.session_state.chat_history = []
154
- if "latest_code" not in st.session_state:
155
- st.session_state.latest_code = ""
156
- if "latest_explanation" not in st.session_state:
157
- st.session_state.latest_explanation = ""
158
- if "latest_input" not in st.session_state:
159
- st.session_state.latest_input = ""
160
-
161
- # Display chat history
162
- for msg in st.session_state.chat_history:
163
- role = "user" if isinstance(msg, HumanMessage) else "bot"
164
- st.markdown(f"<div class='stChatMessage {role}'>{msg.content}</div>", unsafe_allow_html=True)
165
-
166
- # Input form
167
- with st.form("chat_form", clear_on_submit=True):
168
- st.markdown('<div id="floating-container">', unsafe_allow_html=True)
169
- st.markdown('</div>', unsafe_allow_html=True)
170
- user_input = st.text_input("Ask me", label_visibility="collapsed", placeholder="Ask me Anything")
171
- submitted = st.form_submit_button(label="Send")
172
-
173
- if submitted and user_input:
174
- st.session_state.chat_history.append(HumanMessage(content=user_input))
175
- st.session_state.latest_input = user_input
176
-
177
- # Buttons to run each tool manually
178
- if st.session_state.latest_input:
179
- if st.button("🔨 Generate Code"):
180
- state_input = {
181
- "messages": st.session_state.chat_history,
182
- "input": st.session_state.latest_input
183
- }
184
- result = graph.invoke(state_input, node="Generate_Code")
185
- st.session_state.latest_code = result["code"]
186
- st.session_state.chat_history.append(
187
- AIMessage(content="**💻 Generated Code:**\n\n```python\n" + result["code"] + "\n```")
188
- )
189
- st.code(result["code"], language="python")
190
-
191
- if st.button("⚙️ Execute Code") and st.session_state.latest_code:
192
- state_input = {
193
- "code": st.session_state.latest_code,
194
- "input": st.session_state.latest_input
195
- }
196
- result = graph.invoke(state_input, node="Execute_Code")
197
- st.session_state.chat_history.append(
198
- AIMessage(content="**🧪 Execution Result:**\n\n" + result["execution_result"])
199
- )
200
- st.text("🧪 Execution Result:")
201
- st.text(result["execution_result"])
202
-
203
- if st.button("🧠 Explain Code") and st.session_state.latest_code:
204
- state_input = {
205
- "code": st.session_state.latest_code,
206
- "input": st.session_state.latest_input
207
- }
208
- result = graph.invoke(state_input, node="Code_Explainer")
209
- st.session_state.latest_explanation = result["explanation"]
210
- st.session_state.chat_history.append(
211
- AIMessage(content="**🔍 Code Explanation:**\n\n" + result["explanation"])
212
- )
213
- with st.expander("🔍 Code Explanation"):
214
- st.markdown(result["explanation"])
 
 
 
1
  import os
2
  import streamlit as st
3
  from dotenv import load_dotenv
4
+ from langgraph.graph import StateGraph, END, START
 
5
  from langgraph.checkpoint.memory import MemorySaver
6
+ from langchain_core.messages import HumanMessage, AIMessage
7
+ from langchain_core.runnables import Runnable
8
+ from langchain_together import ChatTogether
9
+ from typing import TypedDict, List, Optional
 
 
 
10
  import traceback
11
+ import io
12
+ import sys
13
 
14
  # Load environment variables
15
  load_dotenv()
16
+ llm = ChatTogether(model="meta-llama/Meta-Llama-3-8B-Instruct")
17
+
18
+ # Define the graph state
19
+ class GraphState(TypedDict):
 
 
 
 
20
  input: str
21
+ messages: List[HumanMessage | AIMessage]
22
+ code: Optional[str]
23
+ execution_result: Optional[str]
24
+ explanation: Optional[str]
25
+
26
+ # === Node 1: Generate Code ===
27
+ def generate_code(state: GraphState) -> GraphState:
28
+ prompt = f"""You are a senior Python developer.
29
+ Generate Python code for the following user request. Just return the code only, no explanation.
30
+
31
+ Request: {state['input']}
32
+ """
33
+ messages = state["messages"] + [HumanMessage(content=prompt)]
34
+ response = llm.invoke(messages)
35
+ return {
36
+ **state,
37
+ "messages": messages + [AIMessage(content=response.content)],
38
+ "code": response.content
39
+ }
40
+
41
+ # === Node 2: Execute Code ===
42
+ def execute_code(state: GraphState) -> GraphState:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  code = state.get("code", "")
 
44
  try:
45
+ buffer = io.StringIO()
46
+ with io.StringIO() as buf, io.StringIO() as err_buf:
47
+ sys.stdout = buf
48
+ sys.stderr = err_buf
49
  exec(code, {})
50
+ output = buf.getvalue()
51
+ error = err_buf.getvalue()
52
+ sys.stdout = sys.__stdout__
53
+ sys.stderr = sys.__stderr__
54
+ result = output if output else "✅ Code executed without output."
55
+ if error:
56
+ result += "\n⚠️ Error:\n" + error
57
  except Exception:
58
+ result = "❌ Exception:\n" + traceback.format_exc()
59
+ return {
60
+ **state,
61
+ "execution_result": result
62
+ }
63
+
64
+ # === Node 3: Explain Code ===
65
+ def explain_code(state: GraphState) -> GraphState:
66
+ prompt = f"""You are a code explainer. Please explain the following Python code:
67
+ {state['code']}
 
68
  """
69
+ messages = state["messages"] + [HumanMessage(content=prompt)]
70
+ response = llm.invoke(messages)
71
+ return {
72
+ **state,
73
+ "messages": messages + [AIMessage(content=response.content)],
74
+ "explanation": response.content
75
+ }
76
+
77
+ # === Build LangGraph ===
78
+ builder = StateGraph(GraphState)
79
  builder.add_node("Generate_Code", generate_code)
80
  builder.add_node("Execute_Code", execute_code)
81
+ builder.add_node("Explain_Code", explain_code)
82
+
83
  builder.set_entry_point("Generate_Code")
84
+ builder.add_edge("Generate_Code", "Execute_Code")
85
+ builder.add_edge("Execute_Code", "Explain_Code")
86
+ builder.set_finish_point("Explain_Code")
87
+
88
+ graph = builder.compile()
89
+
90
+ # === Streamlit App ===
91
+ st.set_page_config(page_title="🧠 MitraVerse", page_icon="🧠")
92
+ st.title("🧠 MitraVerse - LangGraph Code Assistant")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  # Initialize session state
95
+ for key in ["chat_history", "latest_code", "latest_explanation", "execution_result"]:
96
+ if key not in st.session_state:
97
+ st.session_state[key] = [] if key == "chat_history" else ""
98
+
99
+ # User input form
100
+ with st.container():
101
+ with st.form("chat_form", clear_on_submit=True):
102
+ user_input = st.text_input("Ask me anything", placeholder="e.g., Write a bubble sort in Python")
103
+ submitted = st.form_submit_button("🚀 Run End-to-End")
104
+
105
+ if submitted and user_input:
106
+ st.session_state.chat_history.append(HumanMessage(content=user_input))
107
+ state_input = {
108
+ "messages": st.session_state.chat_history,
109
+ "input": user_input,
110
+ "code": "",
111
+ "execution_result": "",
112
+ "explanation": ""
113
+ }
114
+ result = graph.invoke(state_input)
115
+ st.session_state.latest_code = result["code"]
116
+ st.session_state.execution_result = result["execution_result"]
117
+ st.session_state.latest_explanation = result["explanation"]
118
+
119
+ # Show generated code
120
+ if st.session_state.latest_code:
121
+ st.subheader("🧾 Generated Code")
122
+ st.code(st.session_state.latest_code, language="python")
123
+ st.download_button("📥 Download Code", st.session_state.latest_code, file_name="generated_code.py")
124
+
125
+ # Show execution result
126
+ if st.session_state.execution_result:
127
+ st.subheader("🧪 Execution Result")
128
+ st.text(st.session_state.execution_result)
129
+
130
+ # Show code explanation
131
+ if st.session_state.latest_explanation:
132
+ st.subheader("💡 Code Explanation")
133
+ st.markdown(st.session_state.latest_explanation)