WaysAheadGlobal commited on
Commit
62f80e8
·
verified ·
1 Parent(s): 1cfcc0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -70
app.py CHANGED
@@ -27,7 +27,7 @@ def get_vectorstore():
27
 
28
  @st.cache_resource
29
  def get_memory():
30
- """Initialize memory to store previous interactions."""
31
  return ConversationBufferMemory(memory_key="chat_history", return_messages=True)
32
 
33
  # Initialize models and memory
@@ -35,53 +35,52 @@ llm = get_openai_model()
35
  vectorstore = get_vectorstore()
36
  memory = get_memory()
37
 
38
- # ------------------------------- PROMPT TEMPLATES -------------------------------
39
- prompt_template = PromptTemplate(
40
- input_variables=["language", "description", "context"],
41
  template="""
42
- You are an **expert AI coding assistant** with memory.
43
 
44
- **Context from previous interactions:**
45
- {context}
46
-
47
- **Current Task:** Generate a structured and functional code snippet.
48
 
49
  **Language:** {language}
50
- **Description:** {description}
51
 
52
- - Ensure the code follows best practices.
53
- - Include comments explaining key sections.
54
- - If applicable, provide installation/setup instructions.
 
 
55
  """
56
  )
57
 
58
- enhance_template = PromptTemplate(
59
- input_variables=["code", "context"],
60
  template="""
61
- Improve the given code based on context:
62
-
63
- **Previous Conversation Context:**
64
- {context}
65
-
66
  **Original Code:**
67
  {code}
68
 
69
- **Enhanced Code with better efficiency, security, and structure:**
 
 
 
 
 
 
70
  """
71
  )
72
 
73
- modify_template = PromptTemplate(
74
- input_variables=["code", "modification", "context"],
75
  template="""
76
- Modify the code as per user requirements while maintaining context.
77
-
78
- **Previous Conversation Context:**
79
- {context}
80
 
81
  **Original Code:**
82
  {code}
83
 
84
- **User Requested Modification:**
85
  {modification}
86
 
87
  **Modified Code:**
@@ -89,73 +88,65 @@ modify_template = PromptTemplate(
89
  )
90
 
91
  # ------------------------------- STREAMLIT UI -------------------------------
92
- st.title("🧠💻 Context-Aware Intelligent Coding Agent")
93
- st.markdown("### Generate, Enhance, and Modify Code While Retaining Context!")
94
 
95
- languages = ["Python", "JavaScript", "PHP", "C#", "HTML/CSS/JS"]
 
 
 
96
  language = st.selectbox("Select a programming language:", languages)
97
- description = st.text_area("Describe the code you need:", placeholder="E.g., Build a login system in PHP")
98
 
99
  # ------------------------------- SESSION STATE -------------------------------
100
  if "generated_code" not in st.session_state:
101
  st.session_state.generated_code = ""
102
 
103
- if "chat_history" not in st.session_state:
104
- st.session_state.chat_history = []
105
-
106
  # ------------------------------- ASYNC FUNCTIONS -------------------------------
107
  async def generate_code():
108
- """Generates code while retaining context."""
109
  if not description.strip():
110
- st.warning("Please enter a code description!")
111
  return
112
 
113
- st.info(" Generating your code...")
114
-
115
- # Retrieve past interactions for context
116
- previous_context = "\n".join(st.session_state.chat_history)
117
 
118
  # Format prompt
119
- prompt = prompt_template.format(language=language, description=description, context=previous_context)
120
 
121
  # Generate code asynchronously
122
  response = await asyncio.to_thread(llm.invoke, [HumanMessage(content=prompt)])
123
 
124
- # Store generated code and update chat history
125
  st.session_state.generated_code = response.content
126
- st.session_state.chat_history.append(f"User asked: {description}\nAI Response:\n{response.content}")
127
 
128
- # Display code
129
- st.subheader("Generated Code:")
130
  st.code(st.session_state.generated_code, language=language.lower())
131
 
132
- # Save to vectorstore for future reference
133
  vectorstore.add_texts([st.session_state.generated_code])
134
 
135
  async def enhance_code():
136
- """Enhances the generated code while considering past context."""
137
  if not st.session_state.generated_code:
138
  st.warning("Generate code first before enhancing!")
139
  return
140
 
141
- st.info(" Enhancing the code for better performance...")
142
-
143
- previous_context = "\n".join(st.session_state.chat_history)
144
 
145
- prompt = enhance_template.format(code=st.session_state.generated_code, context=previous_context)
146
  response = await asyncio.to_thread(llm.invoke, [HumanMessage(content=prompt)])
147
 
148
  st.session_state.generated_code = response.content
149
- st.session_state.chat_history.append(f"AI Enhanced Code:\n{response.content}")
150
-
151
- st.subheader("Enhanced Code:")
152
  st.code(st.session_state.generated_code, language=language.lower())
153
 
154
  async def modify_code():
155
- """Modifies existing code based on user input while remembering context."""
156
- modification = st.text_area("Specify modifications (optional):", placeholder="E.g., Add validation for login form")
157
 
158
- if st.button("Apply Modifications"):
159
  if not st.session_state.generated_code:
160
  st.warning("Generate code first before modifying!")
161
  return
@@ -163,17 +154,13 @@ async def modify_code():
163
  st.warning("Please enter modification details!")
164
  return
165
 
166
- st.info(" Modifying the code as per request...")
167
 
168
- previous_context = "\n".join(st.session_state.chat_history)
169
-
170
- prompt = modify_template.format(code=st.session_state.generated_code, modification=modification, context=previous_context)
171
  response = await asyncio.to_thread(llm.invoke, [HumanMessage(content=prompt)])
172
 
173
  st.session_state.generated_code = response.content
174
- st.session_state.chat_history.append(f"User Modification Request: {modification}\nAI Modified Code:\n{response.content}")
175
-
176
- st.subheader("Modified Code:")
177
  st.code(st.session_state.generated_code, language=language.lower())
178
 
179
  # ------------------------------- BUTTONS -------------------------------
@@ -194,9 +181,5 @@ with col3:
194
  if st.session_state.generated_code:
195
  st.download_button("Download Code", st.session_state.generated_code, "generated_code.txt", "text/plain")
196
 
197
- # ------------------------------- DISPLAY MEMORY -------------------------------
198
- st.subheader("Chat History & Context")
199
- st.text_area("Conversation Memory", value="\n".join(st.session_state.chat_history), height=200, disabled=True)
200
-
201
  st.markdown("---")
202
- st.markdown("🔹 **Built with LangChain (Optimized), OpenAI GPT, and ChromaDB** 🔹")
 
27
 
28
  @st.cache_resource
29
  def get_memory():
30
+ """Initialize memory for conversational interactions."""
31
  return ConversationBufferMemory(memory_key="chat_history", return_messages=True)
32
 
33
  # Initialize models and memory
 
35
  vectorstore = get_vectorstore()
36
  memory = get_memory()
37
 
38
+ # ------------------------------- STRONGER PROMPT TEMPLATES -------------------------------
39
+ code_prompt = PromptTemplate(
40
+ input_variables=["language", "description"],
41
  template="""
42
+ You are an **expert AI coding assistant** specializing in **ALL programming languages**.
43
 
44
+ **Task:** Generate an optimized, structured, and functional code snippet.
 
 
 
45
 
46
  **Language:** {language}
47
+ **User Request:** {description}
48
 
49
+ - Follow industry best practices and clean code principles.
50
+ - Include relevant comments for clarity.
51
+ - If applicable, provide setup instructions.
52
+ - Use efficient algorithms and optimize performance.
53
+ - Ensure security measures are implemented where needed.
54
  """
55
  )
56
 
57
+ enhance_prompt = PromptTemplate(
58
+ input_variables=["code"],
59
  template="""
60
+ Improve the following code:
61
+
 
 
 
62
  **Original Code:**
63
  {code}
64
 
65
+ **Enhancements Required:**
66
+ - Optimize performance and efficiency.
67
+ - Improve security, error handling, and best practices.
68
+ - Add better structure and documentation.
69
+ - Ensure clean, readable formatting.
70
+
71
+ **Enhanced Code:**
72
  """
73
  )
74
 
75
+ modify_prompt = PromptTemplate(
76
+ input_variables=["code", "modification"],
77
  template="""
78
+ Modify the following code based on user requests:
 
 
 
79
 
80
  **Original Code:**
81
  {code}
82
 
83
+ **Modification Request:**
84
  {modification}
85
 
86
  **Modified Code:**
 
88
  )
89
 
90
  # ------------------------------- STREAMLIT UI -------------------------------
91
+ st.title("🧠💻 Intelligent Coding Agent")
92
+ st.markdown("### Generate, Improve, and Modify Code in Any Programming Language!")
93
 
94
+ languages = [
95
+ "Python", "JavaScript", "TypeScript", "PHP", "C#", "Java", "C++", "Go", "Rust", "Swift",
96
+ "Kotlin", "R", "SQL", "Bash", "HTML/CSS/JS"
97
+ ]
98
  language = st.selectbox("Select a programming language:", languages)
99
+ description = st.text_area("What do you need?", placeholder="E.g., Build an authentication system in Flask")
100
 
101
  # ------------------------------- SESSION STATE -------------------------------
102
  if "generated_code" not in st.session_state:
103
  st.session_state.generated_code = ""
104
 
 
 
 
105
  # ------------------------------- ASYNC FUNCTIONS -------------------------------
106
  async def generate_code():
107
+ """Generates optimized code while keeping a conversational flow."""
108
  if not description.strip():
109
+ st.warning("Please enter a description!")
110
  return
111
 
112
+ st.info("🧑‍💻 Thinking and generating your code...")
 
 
 
113
 
114
  # Format prompt
115
+ prompt = code_prompt.format(language=language, description=description)
116
 
117
  # Generate code asynchronously
118
  response = await asyncio.to_thread(llm.invoke, [HumanMessage(content=prompt)])
119
 
120
+ # Store generated code and maintain conversation flow
121
  st.session_state.generated_code = response.content
 
122
 
123
+ # Display code in a properly formatted block
124
+ st.subheader("Here’s your generated code:")
125
  st.code(st.session_state.generated_code, language=language.lower())
126
 
127
+ # Save to vectorstore for reference
128
  vectorstore.add_texts([st.session_state.generated_code])
129
 
130
  async def enhance_code():
131
+ """Enhances existing code with better efficiency, structure, and readability."""
132
  if not st.session_state.generated_code:
133
  st.warning("Generate code first before enhancing!")
134
  return
135
 
136
+ st.info("🔄 Improving your code...")
 
 
137
 
138
+ prompt = enhance_prompt.format(code=st.session_state.generated_code)
139
  response = await asyncio.to_thread(llm.invoke, [HumanMessage(content=prompt)])
140
 
141
  st.session_state.generated_code = response.content
142
+ st.subheader("Here’s the improved version:")
 
 
143
  st.code(st.session_state.generated_code, language=language.lower())
144
 
145
  async def modify_code():
146
+ """Modifies code based on user feedback."""
147
+ modification = st.text_area("What changes do you want?", placeholder="E.g., Add input validation")
148
 
149
+ if st.button("Apply Changes"):
150
  if not st.session_state.generated_code:
151
  st.warning("Generate code first before modifying!")
152
  return
 
154
  st.warning("Please enter modification details!")
155
  return
156
 
157
+ st.info("🔄 Making modifications...")
158
 
159
+ prompt = modify_prompt.format(code=st.session_state.generated_code, modification=modification)
 
 
160
  response = await asyncio.to_thread(llm.invoke, [HumanMessage(content=prompt)])
161
 
162
  st.session_state.generated_code = response.content
163
+ st.subheader("Here’s the modified code:")
 
 
164
  st.code(st.session_state.generated_code, language=language.lower())
165
 
166
  # ------------------------------- BUTTONS -------------------------------
 
181
  if st.session_state.generated_code:
182
  st.download_button("Download Code", st.session_state.generated_code, "generated_code.txt", "text/plain")
183
 
 
 
 
 
184
  st.markdown("---")
185
+ st.markdown("🔹 **Powered by OpenAI GPT-4, LangChain, and ChromaDB** 🔹")