WaysAheadGlobal commited on
Commit
1cfcc0f
·
verified ·
1 Parent(s): 17939d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -104
app.py CHANGED
@@ -1,131 +1,202 @@
1
  import streamlit as st
2
- import subprocess
3
  import os
4
- import time
5
- from langchain.text_splitter import RecursiveCharacterTextSplitter
6
- from langchain_openai import OpenAIEmbeddings, ChatOpenAI
7
  from langchain_chroma import Chroma
8
- from langchain.chains import RetrievalQA
9
- from langchain.memory import ConversationBufferMemory
10
  from langchain.prompts import PromptTemplate
 
11
  from langchain.schema import AIMessage, HumanMessage
12
 
13
- # Set OpenAI API Key
14
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
15
-
16
- # Initialize OpenAI Chat Model
17
- llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.3, openai_api_key=OPENAI_API_KEY)
18
-
19
- # Initialize ChromaDB Vector Store
20
- vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
21
-
22
- # Define Prompt Template
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  prompt_template = PromptTemplate(
24
- input_variables=["language", "description"],
25
  template="""
26
- You are an **expert AI coding assistant** proficient in **ALL web development languages**, including:
27
- - HTML, CSS, JavaScript, React, Vue.js, Angular, Svelte
28
- - Node.js, Express.js, Django, Flask, Ruby on Rails
29
- - PHP, Laravel, ASP.NET, .NET Core, C#
30
- - Python, TypeScript, GraphQL, SQL, NoSQL
31
- - WebSockets, REST APIs, WebAssembly, Web3.js
32
 
33
- **Task:**
34
- Generate a **complete, structured, and functional** code snippet based on the user request.
35
 
36
  **Language:** {language}
37
  **Description:** {description}
38
 
39
- - Ensure the code follows best practices and industry standards.
40
  - Include comments explaining key sections.
41
  - If applicable, provide installation/setup instructions.
42
  """
43
  )
44
 
45
- # Streamlit UI
46
- st.set_page_config(page_title="Intelligent Coding Agent", layout="wide")
47
- st.title("🧠💻 Intelligent Coding Agent")
48
- st.markdown("### Generate and Run Code in Any Web Development Language!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- # Language selection
51
  languages = ["Python", "JavaScript", "PHP", "C#", "HTML/CSS/JS"]
52
  language = st.selectbox("Select a programming language:", languages)
53
-
54
- # User input for code description
55
  description = st.text_area("Describe the code you need:", placeholder="E.g., Build a login system in PHP")
56
 
57
- # Generate button
58
- if st.button("Generate Code"):
 
 
 
 
 
 
 
 
59
  if not description.strip():
60
  st.warning("Please enter a code description!")
61
- else:
62
- # Show Progress Bar
63
- progress_bar = st.progress(0)
64
- status_text = st.empty()
65
-
66
- # Progress Simulation
67
- for i in range(50):
68
- progress_bar.progress(i + 1)
69
- status_text.text(f"🔄 Generating code... ({i+1}%)")
70
- time.sleep(0.03)
71
-
72
- # Generate Code
73
- prompt = prompt_template.format(language=language, description=description)
74
- response = llm.invoke([HumanMessage(content=prompt)])
75
-
76
- # Progress Complete
77
- for i in range(50, 100):
78
- progress_bar.progress(i + 1)
79
- status_text.text(f" Code Generation Complete ({i+1}%)")
80
- time.sleep(0.01)
81
-
82
- # Show Generated Code
83
- st.subheader("Generated Code:")
84
- st.code(response.content, language=language.lower())
85
-
86
- # Save to ChromaDB
87
- vectorstore.add_texts([response.content])
88
-
89
- # Run Code Section
90
- st.subheader("Run Code (For Python, JS, PHP, C#)")
91
- if st.button("Run Code"):
92
- if language in ["Python", "JavaScript", "PHP", "C#"]:
93
- # Save code to temp file
94
- filename = f"temp_code.{language.lower()}"
95
- with open(filename, "w") as f:
96
- f.write(response.content)
97
-
98
- # Determine execution command
99
- if language == "Python":
100
- command = ["python", filename]
101
- elif language == "JavaScript":
102
- command = ["node", filename]
103
- elif language == "PHP":
104
- command = ["php", filename]
105
- elif language == "C#":
106
- command = ["dotnet-script", filename]
107
-
108
- # Run the code using subprocess
109
- try:
110
- output = subprocess.run(command, capture_output=True, text=True, timeout=10)
111
- st.subheader("Output:")
112
- st.text(output.stdout if output.stdout else "No output")
113
- if output.stderr:
114
- st.subheader("Errors:")
115
- st.text(output.stderr)
116
- except Exception as e:
117
- st.error(f"Error executing code: {str(e)}")
118
-
119
- # Render HTML/CSS/JS
120
- if language == "HTML/CSS/JS":
121
- st.subheader("Live Preview:")
122
- st.components.v1.html(response.content, height=500)
123
-
124
- # Download Option
125
- if st.button("Download Code"):
126
- with open("generated_code.txt", "w") as f:
127
- f.write(response.content)
128
- st.download_button("Download Generated Code", "generated_code.txt", "text/plain")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
  st.markdown("---")
131
- st.markdown("🔹 **Built with LangChain (Latest), OpenAI GPT, and ChromaDB** 🔹")
 
1
  import streamlit as st
 
2
  import os
3
+ import asyncio
4
+ from langchain_openai import ChatOpenAI, OpenAIEmbeddings
 
5
  from langchain_chroma import Chroma
 
 
6
  from langchain.prompts import PromptTemplate
7
+ from langchain.memory import ConversationBufferMemory
8
  from langchain.schema import AIMessage, HumanMessage
9
 
10
+ # ------------------------------- CONFIG -------------------------------
11
+ st.set_page_config(page_title="Intelligent Coding Agent", layout="wide")
12
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # Ensure API key is set
13
+
14
+ # ------------------------------- CACHING -------------------------------
15
+ @st.cache_resource
16
+ def get_openai_model():
17
+ """Load OpenAI model once (cached) for efficiency."""
18
+ return ChatOpenAI(model_name="gpt-4-turbo", temperature=0.7)
19
+
20
+ @st.cache_resource
21
+ def get_vectorstore():
22
+ """Load ChromaDB once (cached) for faster retrieval."""
23
+ return Chroma(
24
+ embedding_function=OpenAIEmbeddings(),
25
+ persist_directory="/home/user/chroma_db"
26
+ )
27
+
28
+ @st.cache_resource
29
+ def get_memory():
30
+ """Initialize memory to store previous interactions."""
31
+ return ConversationBufferMemory(memory_key="chat_history", return_messages=True)
32
+
33
+ # Initialize models and memory
34
+ llm = get_openai_model()
35
+ vectorstore = get_vectorstore()
36
+ memory = get_memory()
37
+
38
+ # ------------------------------- PROMPT TEMPLATES -------------------------------
39
  prompt_template = PromptTemplate(
40
+ input_variables=["language", "description", "context"],
41
  template="""
42
+ You are an **expert AI coding assistant** with memory.
43
+
44
+ **Context from previous interactions:**
45
+ {context}
 
 
46
 
47
+ **Current Task:** Generate a structured and functional code snippet.
 
48
 
49
  **Language:** {language}
50
  **Description:** {description}
51
 
52
+ - Ensure the code follows best practices.
53
  - Include comments explaining key sections.
54
  - If applicable, provide installation/setup instructions.
55
  """
56
  )
57
 
58
+ enhance_template = PromptTemplate(
59
+ input_variables=["code", "context"],
60
+ template="""
61
+ Improve the given code based on context:
62
+
63
+ **Previous Conversation Context:**
64
+ {context}
65
+
66
+ **Original Code:**
67
+ {code}
68
+
69
+ **Enhanced Code with better efficiency, security, and structure:**
70
+ """
71
+ )
72
+
73
+ modify_template = PromptTemplate(
74
+ input_variables=["code", "modification", "context"],
75
+ template="""
76
+ Modify the code as per user requirements while maintaining context.
77
+
78
+ **Previous Conversation Context:**
79
+ {context}
80
+
81
+ **Original Code:**
82
+ {code}
83
+
84
+ **User Requested Modification:**
85
+ {modification}
86
+
87
+ **Modified Code:**
88
+ """
89
+ )
90
+
91
+ # ------------------------------- STREAMLIT UI -------------------------------
92
+ st.title("🧠💻 Context-Aware Intelligent Coding Agent")
93
+ st.markdown("### Generate, Enhance, and Modify Code While Retaining Context!")
94
 
 
95
  languages = ["Python", "JavaScript", "PHP", "C#", "HTML/CSS/JS"]
96
  language = st.selectbox("Select a programming language:", languages)
 
 
97
  description = st.text_area("Describe the code you need:", placeholder="E.g., Build a login system in PHP")
98
 
99
+ # ------------------------------- SESSION STATE -------------------------------
100
+ if "generated_code" not in st.session_state:
101
+ st.session_state.generated_code = ""
102
+
103
+ if "chat_history" not in st.session_state:
104
+ st.session_state.chat_history = []
105
+
106
+ # ------------------------------- ASYNC FUNCTIONS -------------------------------
107
+ async def generate_code():
108
+ """Generates code while retaining context."""
109
  if not description.strip():
110
  st.warning("Please enter a code description!")
111
+ return
112
+
113
+ st.info("⏳ Generating your code...")
114
+
115
+ # Retrieve past interactions for context
116
+ previous_context = "\n".join(st.session_state.chat_history)
117
+
118
+ # Format prompt
119
+ prompt = prompt_template.format(language=language, description=description, context=previous_context)
120
+
121
+ # Generate code asynchronously
122
+ response = await asyncio.to_thread(llm.invoke, [HumanMessage(content=prompt)])
123
+
124
+ # Store generated code and update chat history
125
+ st.session_state.generated_code = response.content
126
+ st.session_state.chat_history.append(f"User asked: {description}\nAI Response:\n{response.content}")
127
+
128
+ # Display code
129
+ st.subheader("Generated Code:")
130
+ st.code(st.session_state.generated_code, language=language.lower())
131
+
132
+ # Save to vectorstore for future reference
133
+ vectorstore.add_texts([st.session_state.generated_code])
134
+
135
+ async def enhance_code():
136
+ """Enhances the generated code while considering past context."""
137
+ if not st.session_state.generated_code:
138
+ st.warning("Generate code first before enhancing!")
139
+ return
140
+
141
+ st.info(" Enhancing the code for better performance...")
142
+
143
+ previous_context = "\n".join(st.session_state.chat_history)
144
+
145
+ prompt = enhance_template.format(code=st.session_state.generated_code, context=previous_context)
146
+ response = await asyncio.to_thread(llm.invoke, [HumanMessage(content=prompt)])
147
+
148
+ st.session_state.generated_code = response.content
149
+ st.session_state.chat_history.append(f"AI Enhanced Code:\n{response.content}")
150
+
151
+ st.subheader("Enhanced Code:")
152
+ st.code(st.session_state.generated_code, language=language.lower())
153
+
154
+ async def modify_code():
155
+ """Modifies existing code based on user input while remembering context."""
156
+ modification = st.text_area("Specify modifications (optional):", placeholder="E.g., Add validation for login form")
157
+
158
+ if st.button("Apply Modifications"):
159
+ if not st.session_state.generated_code:
160
+ st.warning("Generate code first before modifying!")
161
+ return
162
+ if not modification.strip():
163
+ st.warning("Please enter modification details!")
164
+ return
165
+
166
+ st.info("⏳ Modifying the code as per request...")
167
+
168
+ previous_context = "\n".join(st.session_state.chat_history)
169
+
170
+ prompt = modify_template.format(code=st.session_state.generated_code, modification=modification, context=previous_context)
171
+ response = await asyncio.to_thread(llm.invoke, [HumanMessage(content=prompt)])
172
+
173
+ st.session_state.generated_code = response.content
174
+ st.session_state.chat_history.append(f"User Modification Request: {modification}\nAI Modified Code:\n{response.content}")
175
+
176
+ st.subheader("Modified Code:")
177
+ st.code(st.session_state.generated_code, language=language.lower())
178
+
179
+ # ------------------------------- BUTTONS -------------------------------
180
+ col1, col2, col3 = st.columns(3)
181
+
182
+ with col1:
183
+ if st.button("Generate Code"):
184
+ asyncio.run(generate_code())
185
+
186
+ with col2:
187
+ if st.button("Enhance Code"):
188
+ asyncio.run(enhance_code())
189
+
190
+ with col3:
191
+ modify_code()
192
+
193
+ # ------------------------------- DOWNLOAD OPTION -------------------------------
194
+ if st.session_state.generated_code:
195
+ st.download_button("Download Code", st.session_state.generated_code, "generated_code.txt", "text/plain")
196
+
197
+ # ------------------------------- DISPLAY MEMORY -------------------------------
198
+ st.subheader("Chat History & Context")
199
+ st.text_area("Conversation Memory", value="\n".join(st.session_state.chat_history), height=200, disabled=True)
200
 
201
  st.markdown("---")
202
+ st.markdown("🔹 **Built with LangChain (Optimized), OpenAI GPT, and ChromaDB** 🔹")