AbdulmalikAdeyemo commited on
Commit
14ad967
·
verified ·
1 Parent(s): 3f2dff4

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +402 -0
  2. code_assistant_runnable.py +644 -0
  3. requirements.txt +32 -0
app.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_ace import st_ace
3
+ import subprocess
4
+ import time
5
+ import re
6
+ import asyncio
7
+ import json
8
+ import uuid
9
+ import os
10
+ from code_assistant_runnable import get_runnable
11
+
12
+ from langchain_core.messages import SystemMessage, AIMessage, HumanMessage, ToolMessage
13
+
14
+ # Set up page configuration
15
+ st.set_page_config(page_title="AI Code Editor",
16
+ page_icon=":computer:",
17
+ layout="wide")
18
+
19
+ @st.cache_resource
20
+ def create_code_assistant_instance():
21
+ try:
22
+ # Import torch first to ensure proper initialization
23
+ import torch
24
+ torch.set_grad_enabled(False) # Disable gradients since we're only doing inference
25
+
26
+ return get_runnable()
27
+ except Exception as e:
28
+ st.error(f"Error initializing chatbot: {str(e)}")
29
+ return None
30
+
31
+ chatbot = create_code_assistant_instance()
32
+
33
+
34
+ # Initialize session states
35
+ if 'messages' not in st.session_state:
36
+ st.session_state.messages = [
37
+ AIMessage(content="Hello, I am your coding assistant. How can I help you?"),
38
+ ]
39
+
40
+
41
+
42
+ if 'editor_code' not in st.session_state:
43
+ st.session_state.editor_code = ''
44
+
45
+ # Constants
46
+ EDITOR_HEIGHT = 400
47
+ OUTPUT_HEIGHT = 150
48
+
49
+ # Minimal CSS for styling
50
+ st.markdown("""
51
+ <style type="text/css">
52
+ .output-container {
53
+ background-color: rgba(17, 19, 23, 0.8);
54
+ border-radius: 4px;
55
+ padding: 1rem;
56
+ margin-top: 0.5rem;
57
+ min-height: 150px;
58
+ color: white;
59
+ }
60
+
61
+ .placeholder-text {
62
+ color: gray;
63
+ font-style: italic;
64
+ }
65
+
66
+ /* Remove extra padding */
67
+ .block-container {
68
+ padding-top: 1rem !important;
69
+ }
70
+
71
+ /* Ensure chat messages are visible */
72
+ .stChatMessage {
73
+ background-color: rgba(17, 19, 23, 0.8) !important;
74
+ }
75
+
76
+ /* Style section headers consistently */
77
+ .section-header {
78
+ font-size: 1rem;
79
+ margin-bottom: 1rem;
80
+ color: rgb(250, 250, 250);
81
+ font-weight: 500;
82
+ }
83
+
84
+ /* Ensure columns align at the top */
85
+ .column-container {
86
+ display: flex;
87
+ align-items: flex-start;
88
+ }
89
+
90
+ /* Loading indicator styles */
91
+ .loading-spinner {
92
+ display: flex;
93
+ align-items: center;
94
+ gap: 0.5rem;
95
+ padding: 0.5rem;
96
+ border-radius: 0.25rem;
97
+ background-color: rgba(17, 19, 23, 0.8);
98
+ }
99
+
100
+ .loading-text {
101
+ color: #ffffff;
102
+ font-size: 0.875rem;
103
+ }
104
+ </style>
105
+ """, unsafe_allow_html=True)
106
+
107
+
108
+ def analyze_code(code, language):
109
+ """
110
+ Basic code analysis function that looks for common issues
111
+ """
112
+ analysis = []
113
+
114
+ if language == "python":
115
+ # Check for basic Python issues
116
+ if "while" in code and "break" not in code:
117
+ analysis.append("⚠️ While loop detected without break condition - check for infinite loops")
118
+
119
+ if "except:" in code and "except Exception:" not in code:
120
+ analysis.append("⚠️ Bare except clause detected - consider catching specific exceptions")
121
+
122
+ if "print" in code and "if __name__ == '__main__':" not in code:
123
+ analysis.append("💡 Consider adding main guard for scripts with print statements")
124
+
125
+ if re.search(r'^\s+', code, re.MULTILINE):
126
+ analysis.append("🔍 Mixed indentation detected - check spacing")
127
+
128
+ elif language == "rust":
129
+ if "unwrap()" in code:
130
+ analysis.append("⚠️ Usage of unwrap() detected - consider proper error handling")
131
+
132
+ if "mut" not in code and len(code) > 50:
133
+ analysis.append("💡 No mutable variables detected - verify if intentional")
134
+
135
+ if not analysis:
136
+ analysis.append("✅ No immediate issues detected in the code")
137
+
138
+ return "\n".join(analysis)
139
+
140
+ def dummy_ai_response(question, code_context, language):
141
+ """
142
+ Dummy AI response function with basic code context awareness
143
+ """
144
+ time.sleep(1) # Simulate processing time
145
+
146
+ if "debug" in question.lower():
147
+ return f"Here's my analysis of your {language} code:\n" + analyze_code(code_context, language)
148
+
149
+ if "how" in question.lower() and "implement" in question.lower():
150
+ return f"To implement this in {language}, you might want to consider:\n1. Breaking down the problem\n2. Using appropriate data structures\n3. Following {language} best practices"
151
+
152
+ if "error" in question.lower() or "not working" in question.lower():
153
+ return "Let me help you debug that. Could you:\n1. Share the specific error message\n2. Describe what you expected to happen\n3. Describe what actually happened"
154
+
155
+ return f"I see you're working with {language}. Could you clarify what specific help you need with your code?"
156
+
157
+ def run_python_code(code):
158
+ try:
159
+ with open("temp_code.py", "w") as f:
160
+ f.write(code)
161
+ result = subprocess.run(["python", "temp_code.py"],
162
+ capture_output=True,
163
+ text=True)
164
+ return result.stderr if result.stderr else result.stdout
165
+ except Exception as e:
166
+ return f"Error: {e}"
167
+
168
+ def run_rust_code(code):
169
+ with open('code.rs', 'w') as file:
170
+ file.write(code)
171
+
172
+ compile_process = subprocess.Popen(['rustc', 'code.rs'],
173
+ stdout=subprocess.PIPE,
174
+ stderr=subprocess.PIPE,
175
+ text=True)
176
+ compile_output, compile_errors = compile_process.communicate()
177
+
178
+ if compile_process.returncode != 0:
179
+ return f"Compilation Error: {compile_errors}"
180
+
181
+ run_process = subprocess.Popen(['./code'],
182
+ stdout=subprocess.PIPE,
183
+ stderr=subprocess.PIPE,
184
+ text=True)
185
+ run_output, run_errors = run_process.communicate()
186
+ return run_output if not run_errors else run_errors
187
+
188
+ def run_js_code():
189
+
190
+ pass
191
+
192
+ def dummy_auto_complete(code: str, language: str = None) -> str:
193
+ """
194
+ Dummy function to simulate LLM code completion
195
+ Args:
196
+ code (str): The incomplete code in the editor
197
+ language (str, optional): Selected programming language
198
+ Returns:
199
+ str: The completed code
200
+ """
201
+ time.sleep(2) # Simulate processing time
202
+
203
+ # Example completions based on language
204
+ completions = {
205
+ "python": """# Function to calculate sum
206
+ def calculate_sum(a: int, b: int) -> int:
207
+ '''Calculate sum of two integers'''
208
+ return a + b""",
209
+
210
+ "javascript": """// Function to calculate sum
211
+ function calculateSum(a, b) {
212
+ return a + b;
213
+ }""",
214
+
215
+ "rust": """// Function to calculate sum
216
+ fn calculate_sum(a: i32, b: i32) -> i32 {
217
+ a + b
218
+ }"""
219
+ }
220
+
221
+ # Return language-specific completion or default to Python
222
+ return completions.get(language, completions["python"])
223
+
224
+ # Sidebar settings
225
+ with st.sidebar:
226
+ st.title("SolCoder")
227
+ st.header("Solana AI Code Editor")
228
+ theme = st.selectbox("Editor Theme",
229
+ ["monokai", "github", "solarized_dark", "solarized_light", "dracula"])
230
+ font_size = st.slider("Font Size", 12, 24, 14)
231
+ show_gutter = st.checkbox("Show Line Numbers", value=True)
232
+ language = st.selectbox("Language", ["python", "javascript", "rust"], index=0)
233
+
234
+ # Create two columns for main layout
235
+ col1, col2 = st.columns([3, 2])
236
+
237
+ # Left Column - Code Editor and Output
238
+ with col1:
239
+ st.subheader("")
240
+ st.subheader("Code Editor")
241
+ st.markdown("Write your code below and use the buttons to run or debug")
242
+
243
+ # Code editor
244
+ editor = st_ace(
245
+ value=st.session_state.editor_code,
246
+ language=language,
247
+ theme=theme,
248
+ font_size=font_size,
249
+ show_gutter=show_gutter,
250
+ auto_update=True,
251
+ height=EDITOR_HEIGHT,
252
+ key="editor"
253
+ )
254
+
255
+ # Buttons - Modified to include three columns
256
+ button_cols = st.columns(3)
257
+ with button_cols[0]:
258
+ auto_complete_btn = st.button("Auto-Complete", use_container_width=True)
259
+ with button_cols[1]:
260
+ run_btn = st.button("Run Code", use_container_width=True)
261
+ with button_cols[2]:
262
+ debug_btn = st.button("Debug Code", use_container_width=True)
263
+
264
+ # Handle auto-complete button click
265
+ if auto_complete_btn:
266
+ with st.spinner("Generating code completion..."):
267
+ try:
268
+ # Get completed code from dummy function
269
+ completed_code = dummy_auto_complete(st.session_state.editor_code, language)
270
+
271
+
272
+ st.markdown(f'<div class="output-area">```{completed_code}```</div>', unsafe_allow_html=True)
273
+
274
+ # # Update editor content in session state
275
+ st.session_state.editor_code = completed_code
276
+
277
+ # Show success message
278
+ st.success("Code successfully completed!")
279
+
280
+ except Exception as e:
281
+ st.error(f"Error during code completion: {str(e)}")
282
+
283
+ # Output area - simplified container structure
284
+ if run_btn:
285
+ output = run_python_code(editor) if language == "python" else \
286
+ run_rust_code(editor) if language == "rust" else \
287
+ "Currently, only Python and Rust execution is supported."
288
+ st.markdown(f'<div class="output-area">{output}</div>', unsafe_allow_html=True)
289
+ else:
290
+ st.markdown('<div class="output-area placeholder-text">Code output will appear here...</div>',
291
+ unsafe_allow_html=True)
292
+
293
+ def format_ai_response(response):
294
+ """Format AI response into readable message"""
295
+ if isinstance(response, dict):
296
+ # Extract meaningful content from response structure
297
+ if 'generation' in response:
298
+ message = response['generation']
299
+ # Parse structured response appropriately
300
+ formatted_content = []
301
+ if hasattr(message, 'prefix'):
302
+ formatted_content.append(message.prefix)
303
+ if hasattr(message, 'imports'):
304
+ formatted_content.append(f"```\n{message.imports}\n```")
305
+ if hasattr(message, 'code'):
306
+ formatted_content.append(f"```\n{message.code}\n```")
307
+ return "\n".join(formatted_content)
308
+ return str(response) # Fallback for simple responses
309
+
310
+
311
+ # Right Column - Chat Interface
312
+ with col2:
313
+ # Match header styling with the code section
314
+ # st.markdown('<p class="section-header">AI Assistant Chat</p>', unsafe_allow_html=True)
315
+ st.subheader("")
316
+ st.subheader("Code Assistant Agent")
317
+
318
+
319
+
320
+ # conversation
321
+
322
+ def validate_message(message):
323
+ """Validate message before adding to history"""
324
+ if not isinstance(message, (AIMessage, HumanMessage)):
325
+ return False
326
+ if not message.content or not isinstance(message.content, str):
327
+ return False
328
+ return True
329
+
330
+ def add_message_to_history(message):
331
+ """Safely add message to chat history"""
332
+ if validate_message(message):
333
+ st.session_state.messages.append(message)
334
+ return True
335
+ return False
336
+
337
+ # Update message display section
338
+ for message in st.session_state.messages:
339
+ if isinstance(message, AIMessage):
340
+ with st.chat_message("AI"):
341
+ # Handle code blocks in message
342
+ content = message.content
343
+ if "```" in content:
344
+ parts = content.split("```")
345
+ for i, part in enumerate(parts):
346
+ if i % 2 == 0: # Regular text
347
+ if part.strip():
348
+ st.markdown(part)
349
+ else: # Code block
350
+ st.code(part)
351
+ else:
352
+ st.markdown(content)
353
+ elif isinstance(message, HumanMessage):
354
+ with st.chat_message("Human"):
355
+ st.markdown(message.content)
356
+
357
+
358
+
359
+ # Clear chat button
360
+ if st.button("Clear Chat", use_container_width=True):
361
+ st.session_state.messages = []
362
+ st.rerun()
363
+
364
+ if prompt := st.chat_input("Ask about writing solana code..."):
365
+ user_message = HumanMessage(content=prompt)
366
+
367
+ # Add user message to history
368
+ if add_message_to_history(user_message):
369
+ with st.chat_message("AI"):
370
+ # Create a placeholder for the loading indicator
371
+ response_placeholder = st.empty()
372
+
373
+ # Show loading message
374
+ with response_placeholder:
375
+ with st.spinner("AI is thinking..."):
376
+ try:
377
+ # Get AI response
378
+ ai_response = chatbot.invoke({
379
+ "messages": [("user", prompt)],
380
+ "iterations": 0,
381
+ "error": ""
382
+ })
383
+
384
+ # Format and add AI response
385
+ formatted_response = format_ai_response(ai_response)
386
+ ai_message = AIMessage(content=formatted_response)
387
+
388
+ # Clear the loading indicator and show the response
389
+ response_placeholder.empty()
390
+ st.markdown(formatted_response)
391
+
392
+ # Add to history
393
+ add_message_to_history(ai_message)
394
+
395
+ # Only rerun after successful processing
396
+ st.rerun()
397
+
398
+ except Exception as e:
399
+ response_placeholder.error(f"Error generating response: {str(e)}")
400
+
401
+
402
+
code_assistant_runnable.py ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain_ollama import ChatOllama
3
+ from langchain_groq import ChatGroq
4
+ from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
5
+
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from langchain_community.document_loaders import WebBaseLoader
8
+ # from langchain_community.vectorstores import Chroma
9
+ from langchain_chroma import Chroma
10
+
11
+ from langchain_community.embeddings import HuggingFaceBgeEmbeddings
12
+ import pickle
13
+
14
+ from langchain_core.prompts import ChatPromptTemplate
15
+ from langchain_openai import ChatOpenAI
16
+ from pydantic import BaseModel, Field
17
+
18
+ from typing import List
19
+ from typing_extensions import TypedDict
20
+ from langgraph.graph import END, StateGraph, START
21
+
22
+ import subprocess
23
+ import time
24
+ import re
25
+ import json
26
+ import os
27
+ from dotenv import load_dotenv
28
+
29
+
30
+ load_dotenv()
31
+
32
+
33
+ # Add after your imports
34
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
35
+
36
+ # llm = ChatOllama(model="codestral")
37
+ expt_llm = "gpt-4o-mini"
38
+ llm = ChatOpenAI(temperature=0, model=expt_llm)
39
+
40
+ ## Create retrieval from existing store
41
+
42
+ # Load the existing vectorstore
43
+
44
+ # Load an existing (saved) embedding model from a pickle file
45
+
46
+ model_path = "/Users/user/Projects/LLM_Projects/Hackathon/SACE/Model/embedding_model.pkl"
47
+ with open(model_path, 'rb') as f:
48
+ embedding_model = pickle.load(f)
49
+
50
+ print("Loaded embedding model successfully")
51
+
52
+ vectorstore = Chroma(
53
+ collection_name="solcoder-chroma",
54
+ embedding_function=embedding_model,
55
+ persist_directory="solcoder-db"
56
+ )
57
+
58
+ retriever = vectorstore.as_retriever()
59
+
60
+
61
+ # Grader prompt
62
+ code_gen_prompt = ChatPromptTemplate(
63
+ [
64
+ (
65
+ "system",
66
+ """<instructions> You are a coding assistant with expertise in Solana Blockchain ecosystem. \n
67
+ Here is a set of Solana development documentation based on a user question: \n ------- \n {context} \n ------- \n
68
+ Answer the user question based on the above provided documentation. Ensure any code you provide can be executed with all required imports and variables \n
69
+ defined. Structure your answer: 1) a prefix describing the code solution, 2) the imports, 3) the functioning code block. \n
70
+ Invoke the code tool to structure the output correctly. </instructions> \n Here is the user question:""",
71
+ ),
72
+ ("placeholder", "{messages}"),
73
+ ]
74
+ )
75
+
76
+
77
+ # Data model
78
+ class code(BaseModel):
79
+ """Schema for code solutions to questions about Solana development."""
80
+
81
+ prefix: str = Field(description="Description of the problem and approach")
82
+ imports: str = Field(description="Code block import statements")
83
+ code: str = Field(description="Code block not including import statements")
84
+ language: str = Field(description="programming language the code is implemented")
85
+
86
+ class Config:
87
+ json_schema_extra = {
88
+ "example": {
89
+ "prefix": "To read the balance of an account from the Solana network, you can use the `@solana/web3.js` library.",
90
+ "imports": 'import { clusterApiUrl, Connection, PublicKey, LAMPORTS_PER_SOL,} from "@solana/web3.js";',
91
+ "code":"""const connection = new Connection(clusterApiUrl("devnet"), "confirmed");
92
+ const wallet = new PublicKey("nicktrLHhYzLmoVbuZQzHUTicd2sfP571orwo9jfc8c");
93
+
94
+ const balance = await connection.getBalance(wallet);
95
+ console.log(`Balance: ${balance / LAMPORTS_PER_SOL} SOL`);""",
96
+ "language":"typescript"
97
+
98
+ }
99
+ }
100
+
101
+
102
+ # expt_llm = "codestral"
103
+ # llm = ChatOllama(temperature=0, model=expt_llm)
104
+
105
+
106
+ # Post-processing
107
+ def format_docs(docs):
108
+ return "\n\n".join(doc.page_content for doc in docs)
109
+
110
+
111
+
112
+ structured_llm = code_gen_prompt | llm.with_structured_output(code, include_raw=True)
113
+
114
+ # Optional: Check for errors in case tool use is flaky
115
+ def check_llm_output(tool_output):
116
+ """Check for parse error or failure to call the tool"""
117
+
118
+ # Error with parsing
119
+ if tool_output["parsing_error"]:
120
+ # Report back output and parsing errors
121
+ print("Parsing error!")
122
+ raw_output = str(tool_output["raw"].content)
123
+ error = tool_output["parsing_error"]
124
+ raise ValueError(
125
+ f"Error parsing your output! Be sure to invoke the tool. Output: {raw_output}. \n Parse error: {error}"
126
+ )
127
+
128
+ # Tool was not invoked
129
+ elif not tool_output["parsed"]:
130
+ print("Failed to invoke tool!")
131
+ raise ValueError(
132
+ "You did not use the provided tool! Be sure to invoke the tool to structure the output."
133
+ )
134
+ return tool_output
135
+
136
+
137
+ # Chain with output check
138
+ code_chain_raw = (
139
+ code_gen_prompt | structured_llm | check_llm_output
140
+ )
141
+
142
+ def insert_errors(inputs):
143
+ """Insert errors for tool parsing in the messages"""
144
+
145
+ # Get errors
146
+ error = inputs["error"]
147
+ messages = inputs["messages"]
148
+ messages += [
149
+ (
150
+ "assistant",
151
+ f"Retry. You are required to fix the parsing errors: {error} \n\n You must invoke the provided tool.",
152
+ )
153
+ ]
154
+ return {
155
+ "messages": messages,
156
+ "context": inputs["context"],
157
+ }
158
+
159
+
160
+ # This will be run as a fallback chain
161
+ fallback_chain = insert_errors | code_chain_raw
162
+
163
+ N = 3 # Max re-tries
164
+
165
+ code_gen_chain_re_try = code_chain_raw.with_fallbacks(
166
+ fallbacks=[fallback_chain] * N, exception_key="error"
167
+ )
168
+
169
+
170
+ def parse_output(solution):
171
+ """When we add 'include_raw=True' to structured output,
172
+ it will return a dict w 'raw', 'parsed', 'parsing_error'."""
173
+
174
+ return solution["parsed"]
175
+
176
+ # Optional: With re-try to correct for failure to invoke tool
177
+ code_gen_chain = code_gen_chain_re_try | parse_output
178
+
179
+ # No re-try
180
+ # code_gen_chain = code_gen_prompt | structured_llm | parse_output
181
+
182
+
183
+ ### Create State
184
+
185
+ class GraphState(TypedDict):
186
+ """
187
+ Represents the state of our graph.
188
+
189
+ Attributes:
190
+ error : Binary flag for control flow to indicate whether test error was tripped
191
+ messages : With user question, error messages, reasoning
192
+ generation : Code solution
193
+ iterations : Number of tries
194
+ """
195
+
196
+ error: str
197
+ messages: List
198
+ generation: List
199
+ iterations: int
200
+
201
+ ### HELPER FUNCTIONS
202
+
203
+ def check_node_typescript_installation():
204
+ """Check if Node.js and TypeScript are properly installed"""
205
+ try:
206
+ # Check Node.js
207
+ node_version = subprocess.run(["node", "--version"],
208
+ capture_output=True,
209
+ text=True)
210
+ if node_version.returncode != 0:
211
+ return False, "Node.js is not installed or not in PATH"
212
+
213
+ # Check TypeScript
214
+ tsc_version = subprocess.run(["npx", "tsc", "--version"],
215
+ capture_output=True,
216
+ text=True)
217
+ if tsc_version.returncode != 0:
218
+ return False, "TypeScript is not installed. Please run 'npm install -g typescript'"
219
+
220
+ return True, "Environment OK"
221
+ except Exception as e:
222
+ return False, f"Error checking environment: {str(e)}"
223
+
224
+ def create_temp_package_json():
225
+ """Create a temporary package.json file for Node.js execution"""
226
+ package_json = {
227
+ "name": "temp-code-execution",
228
+ "version": "1.0.0",
229
+ "type": "module",
230
+ "dependencies": {
231
+ "typescript": "^4.9.5"
232
+ }
233
+ }
234
+ with open("package.json", "w") as f:
235
+ json.dump(package_json, f)
236
+
237
+ def run_javascript_code(code, is_typescript=False):
238
+ """Execute JavaScript or TypeScript code using Node.js"""
239
+ # Check environment first
240
+ env_ok, env_message = check_node_typescript_installation()
241
+ if not env_ok:
242
+ return f"Environment Error: {env_message}"
243
+ try:
244
+ # Create necessary files
245
+ create_temp_package_json()
246
+
247
+ if is_typescript:
248
+ # For TypeScript, we need to compile first
249
+ with open("temp_code.ts", "w") as f:
250
+ f.write(code)
251
+
252
+ # Compile TypeScript
253
+ compile_process = subprocess.run(
254
+ ["npx", "tsc", "temp_code.ts", "--module", "ES2020", "--target", "ES2020"],
255
+ capture_output=True,
256
+ text=True
257
+ )
258
+
259
+
260
+ # if compile_process.returncode != 0:
261
+ # return f"TypeScript Compilation Error:\n{compile_process.stderr}"
262
+
263
+ return compile_process
264
+
265
+ # Run compiled JavaScript
266
+ file_to_run = "temp_code.js"
267
+ else:
268
+ # For JavaScript, write directly to .js file
269
+ with open("temp_code.js", "w") as f:
270
+ f.write(code)
271
+ file_to_run = "temp_code.js"
272
+
273
+ # Execute the code using Node.js
274
+ result = subprocess.run(
275
+ ["node", file_to_run],
276
+ capture_output=True,
277
+ text=True
278
+ )
279
+
280
+ # Clean up temporary files
281
+ cleanup_files = ["temp_code.js", "temp_code.ts", "package.json"]
282
+ for file in cleanup_files:
283
+ if os.path.exists(file):
284
+ os.remove(file)
285
+
286
+ # return result.stderr if result.stderr else result.stdout
287
+ return result
288
+
289
+ except Exception as e:
290
+ return f"Error: {e}"
291
+
292
+ def run_rust_code(code):
293
+ with open('code.rs', 'w') as file:
294
+ file.write(code)
295
+
296
+ compile_process = subprocess.Popen(['rustc', 'code.rs'],
297
+ stdout=subprocess.PIPE,
298
+ stderr=subprocess.PIPE,
299
+ text=True)
300
+ compile_output, compile_errors = compile_process.communicate()
301
+
302
+ if compile_process.returncode != 0:
303
+ return f"Compilation Error: {compile_errors}"
304
+
305
+ run_process = subprocess.Popen(['./code'],
306
+ stdout=subprocess.PIPE,
307
+ stderr=subprocess.PIPE,
308
+ text=True)
309
+ run_output, run_errors = run_process.communicate()
310
+ return run_output if not run_errors else run_errors
311
+
312
+
313
+ ### Parameter
314
+
315
+ # Max tries
316
+ max_iterations = 3
317
+
318
+ # Reflect
319
+ # flag = 'reflect'
320
+ flag = "do not reflect"
321
+
322
+ ### Nodes
323
+
324
+
325
+ def generate(state: GraphState):
326
+ """
327
+ Generate a code solution
328
+
329
+ Args:
330
+ state (dict): The current graph state
331
+
332
+ Returns:
333
+ state (dict): New key added to state, generation
334
+ """
335
+
336
+ print("---GENERATING CODE SOLUTION---")
337
+
338
+ # State
339
+ messages = state["messages"]
340
+ iterations = state["iterations"]
341
+ error = state["error"]
342
+
343
+ question = state['messages'][-1][1]
344
+
345
+ # We have been routed back to generation with an error
346
+ if error == "yes":
347
+ messages += [
348
+ (
349
+ "user",
350
+ "Now, try again. Invoke the code tool to structure the output with a prefix, imports, and code block:",
351
+ )
352
+ ]
353
+
354
+ # Post-processing
355
+ def format_docs(docs):
356
+ return "\n\n".join(doc.page_content for doc in docs)
357
+
358
+ retrieved_docs = retriever.invoke(question)
359
+ formated_docs = format_docs(retrieved_docs)
360
+
361
+
362
+ # Solution
363
+ code_solution = code_gen_chain.invoke(
364
+ {"context": formated_docs, "messages": messages}
365
+ )
366
+ messages += [
367
+ (
368
+ "assistant",
369
+ f"{code_solution.prefix} \n Imports: {code_solution.imports} \n Code: {code_solution.code}",
370
+ )
371
+ ]
372
+
373
+ # Increment
374
+ iterations = iterations + 1
375
+ return {"generation": code_solution, "messages": messages, "iterations": iterations}
376
+
377
+
378
+ def code_check(state: GraphState):
379
+ """
380
+ Check code
381
+
382
+ Args:
383
+ state (dict): The current graph state
384
+
385
+ Returns:
386
+ state (dict): New key added to state, error
387
+ """
388
+
389
+ print("---CHECKING CODE---")
390
+
391
+ # State
392
+ messages = state["messages"]
393
+ code_solution = state["generation"]
394
+ iterations = state["iterations"]
395
+
396
+ # Get solution components
397
+ imports = code_solution.imports
398
+ code = code_solution.code
399
+ language = code_solution.language
400
+
401
+ if language.lower()=="python":
402
+
403
+
404
+ # Check imports
405
+ try:
406
+ exec(imports)
407
+ except Exception as e:
408
+ print("---CODE IMPORT CHECK: FAILED---")
409
+ error_message = [("user", f"Your solution failed the import test: {e}")]
410
+ messages += error_message
411
+ return {
412
+ "generation": code_solution,
413
+ "messages": messages,
414
+ "iterations": iterations,
415
+ "error": "yes",
416
+ }
417
+
418
+ # Check execution
419
+ try:
420
+ exec(imports + "\n" + code)
421
+ except Exception as e:
422
+ print("---CODE BLOCK CHECK: FAILED---")
423
+ error_message = [("user", f"Your solution failed the code execution test: {e}")]
424
+ messages += error_message
425
+ return {
426
+ "generation": code_solution,
427
+ "messages": messages,
428
+ "iterations": iterations,
429
+ "error": "yes",
430
+ }
431
+
432
+ if language.lower()=="javascript":
433
+
434
+ full_code = imports + "\n" + code
435
+
436
+
437
+ result = run_javascript_code(full_code, is_typescript=False)
438
+
439
+ if result.stderr:
440
+ print("---JS CODE BLOCK CHECK: FAILED---")
441
+ print(f"This is the error:{result.stderr}")
442
+ error_message = [("user", f"Your javascript solution failed the code execution test: {result.stderr}")]
443
+ messages += error_message
444
+ return {
445
+ "generation": code_solution,
446
+ "messages": messages,
447
+ "iterations": iterations,
448
+ "error": "yes",
449
+ }
450
+
451
+
452
+
453
+ if language.lower()=="typescript":
454
+
455
+ full_code = imports + "\n" + code
456
+
457
+
458
+ result = run_javascript_code(full_code, is_typescript=True)
459
+
460
+ if result.stderr:
461
+ print("---TS CODE BLOCK CHECK: FAILED---")
462
+ print(f"This is the error:{result.stderr}")
463
+ error_message = [("user", f"Your typesript solution failed the code execution test: {result.stderr}")]
464
+ messages += error_message
465
+ return {
466
+ "generation": code_solution,
467
+ "messages": messages,
468
+ "iterations": iterations,
469
+ "error": "yes",
470
+ }
471
+
472
+
473
+ if language.lower()=="rust":
474
+
475
+ full_code = imports + "\n" + code
476
+
477
+ with open('code.rs', 'w') as file:
478
+ file.write(full_code)
479
+
480
+ compile_process = subprocess.Popen(['rustc', 'code.rs'],
481
+ stdout=subprocess.PIPE,
482
+ stderr=subprocess.PIPE,
483
+ text=True)
484
+ compile_output, compile_errors = compile_process.communicate()
485
+
486
+ if compile_process.stderr:
487
+ # return f"Compilation Error: {compile_errors}"
488
+ print("---RUST CODE BLOCK CHECK: COMPILATION FAILED---")
489
+ print(f"This is the error:{compile_process.stderr}")
490
+ error_message = [("user", f"Your rust solution failed the code compilation test: {compile_process.stderr}")]
491
+ messages += error_message
492
+ return {
493
+ "generation": code_solution,
494
+ "messages": messages,
495
+ "iterations": iterations,
496
+ "error": "yes",
497
+ }
498
+
499
+ run_process = subprocess.Popen(['./code'],
500
+ stdout=subprocess.PIPE,
501
+ stderr=subprocess.PIPE,
502
+ text=True)
503
+ run_output, run_errors = run_process.communicate()
504
+
505
+
506
+ if run_process.stderr:
507
+ print("---RUST CODE BLOCK CHECK: RUN FAILED---")
508
+ print(f"This is the error:{run_errors}")
509
+ error_message = [("user", f"Your rust solution failed the code run test: {run_errors}")]
510
+ messages += error_message
511
+ return {
512
+ "generation": code_solution,
513
+ "messages": messages,
514
+ "iterations": iterations,
515
+ "error": "yes",
516
+ }
517
+ # return run_output if not run_errors else run_errors
518
+
519
+
520
+ elif language.lower() not in ["rust", "python", "typescript", "javascript"]:
521
+
522
+ # Can't test the code
523
+ print("---CANNOT TEST CODE: CODE NOT IN EXPECTED LANGUAGE---")
524
+
525
+ return {
526
+ "generation": code_solution,
527
+ "messages": messages,
528
+ "iterations": iterations,
529
+ "error": "no",
530
+ }
531
+
532
+
533
+
534
+ # No errors
535
+ print("---NO CODE TEST FAILURES---")
536
+ return {
537
+ "generation": code_solution,
538
+ "messages": messages,
539
+ "iterations": iterations,
540
+ "error": "no",
541
+ }
542
+
543
+
544
+ def reflect(state: GraphState):
545
+ """
546
+ Reflect on errors
547
+
548
+ Args:
549
+ state (dict): The current graph state
550
+
551
+ Returns:
552
+ state (dict): New key added to state, generation
553
+ """
554
+
555
+ print("---REFLECTING ON CODE SOLUTION ERRORS---")
556
+
557
+ # State
558
+ messages = state["messages"]
559
+ iterations = state["iterations"]
560
+ code_solution = state["generation"]
561
+ question = state['messages'][-1][1]
562
+
563
+ # Prompt reflection
564
+
565
+ # Post-processing
566
+ def format_docs(docs):
567
+ return "\n\n".join(doc.page_content for doc in docs)
568
+
569
+ retrieved_docs = retriever.invoke(question)
570
+ formated_docs = format_docs(retrieved_docs)
571
+
572
+ # Add reflection
573
+ reflections = code_gen_chain.invoke(
574
+ {"context": formated_docs, "messages": messages}
575
+ )
576
+
577
+ messages += [("assistant", f"Here are reflections on the error: {reflections}")]
578
+ return {"generation": code_solution, "messages": messages, "iterations": iterations}
579
+
580
+
581
+ ### Edges
582
+
583
+
584
+ def decide_to_finish(state: GraphState):
585
+ """
586
+ Determines whether to finish.
587
+
588
+ Args:
589
+ state (dict): The current graph state
590
+
591
+ Returns:
592
+ str: Next node to call
593
+ """
594
+ error = state["error"]
595
+ iterations = state["iterations"]
596
+
597
+ if error == "no" or iterations == max_iterations:
598
+ print("---DECISION: FINISH---")
599
+ return "end"
600
+ else:
601
+ print("---DECISION: RE-TRY SOLUTION---")
602
+ if flag == "reflect":
603
+ return "reflect"
604
+ else:
605
+ return "generate"
606
+
607
+
608
+
609
+ def get_runnable():
610
+ workflow = StateGraph(GraphState)
611
+
612
+ # Define the nodes
613
+ workflow.add_node("generate", generate) # generation solution
614
+ workflow.add_node("check_code", code_check) # check code
615
+ workflow.add_node("reflect", reflect) # reflect
616
+
617
+ # Build graph
618
+ workflow.add_edge(START, "generate")
619
+ workflow.add_edge("generate", "check_code")
620
+ workflow.add_conditional_edges(
621
+ "check_code",
622
+ decide_to_finish,
623
+ {
624
+ "end": END,
625
+ "reflect": "reflect",
626
+ "generate": "generate",
627
+ },
628
+ )
629
+ workflow.add_edge("reflect", "generate")
630
+
631
+ # Remove the checkpointer for now since it's causing issues
632
+ code_assistant_app = workflow.compile()
633
+
634
+ # memory = AsyncSqliteSaver.from_conn_string(":memory:")
635
+
636
+ # code_assistant_app = workflow.compile(checkpointer=memory)
637
+
638
+ return code_assistant_app
639
+
640
+ # if __name__ == "__main__":
641
+ # graph = get_runnable()
642
+ # prompt = "How do I read from the solana network?"
643
+ # print(f'{graph.invoke({"messages": [("user", prompt)], "iterations": 0, "error": ""})}')
644
+
requirements.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langgraph==0.2.56
2
+ langchain-community
3
+ langchain-groq
4
+ langchain-anthropic
5
+ langchain-openai
6
+ langchain-ollama
7
+ langchain-nomic[local]
8
+ tiktoken
9
+ langchainhub
10
+ chromadb
11
+ langchain-chroma
12
+ langchain-text-splitters
13
+ bs4
14
+ python-dotenv
15
+ sentence_transformers
16
+ langchain_experimental
17
+ matplotlib
18
+ streamlit==1.38.0
19
+ streamlit_ace==0.1.1
20
+ transformers
21
+ accelerate
22
+ datasets>=2.16.1
23
+ bitsandbytes==0.41.3
24
+ peft==0.8.2
25
+ trl==0.7.10
26
+ wandb==0.16.3
27
+ huggingface_hub
28
+ nbformat
29
+ PyGithub
30
+ playwright
31
+ aiosqlite==0.20.0
32
+ langgraph-checkpoint-sqlite