ositamiles commited on
Commit
fea9521
Β·
verified Β·
1 Parent(s): 7a7e572

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +232 -0
app.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Standard library imports
2
+ import json
3
+ from typing import Tuple
4
+
5
+ # Third-party imports
6
+ import gradio as gr
7
+ import instructor
8
+ from groq import Groq
9
+ from pydantic import BaseModel, ValidationError
10
+
11
+ # Local imports
12
+ from config import (
13
+ APP_HEADER,
14
+ APP_TITLE,
15
+ GROQ_API_KEY,
16
+ LLM_MAX_TOKENS,
17
+ LLM_MODEL,
18
+ LLM_SYSTEM_PROMPT,
19
+ )
20
+
21
+
22
+ # Initialize Instructor with the Groq client
23
+ client = Groq(api_key=GROQ_API_KEY)
24
+ client = instructor.patch(client)
25
+
26
+
27
+ # Define response models for feedback and code using Pydantic
28
+ class CodeResponse(BaseModel):
29
+ """Code Response"""
30
+
31
+ planning: str
32
+ full_python_code: str
33
+ commentary: str
34
+
35
+
36
+ def get_llm_responses(
37
+ user_input: str, conversation: list, current_code: str = None
38
+ ) -> Tuple[list, str, str]:
39
+ """
40
+ Generates feedback and code based on user input using the Instructor LLM.
41
+
42
+ Args:
43
+ user_input (str): The input text from the user.
44
+ conversation (list): The conversation history.
45
+ current_code (str, optional): Existing code if any.
46
+
47
+ Returns:
48
+ Tuple[list, str, str]: A tuple containing updated conversation, generated code, and formatted conversation history.
49
+ """
50
+ try:
51
+ # Update conversation history with user input
52
+ conversation.append(
53
+ {
54
+ "role": "user",
55
+ "content": (
56
+ user_input
57
+ if current_code is None
58
+ else f"{user_input} And here is the existing code: {current_code}"
59
+ ),
60
+ }
61
+ )
62
+
63
+ # Generate Feedback
64
+ feedback_resp = client.chat.completions.create(
65
+ model=LLM_MODEL,
66
+ response_model=CodeResponse,
67
+ max_tokens=LLM_MAX_TOKENS,
68
+ messages=conversation,
69
+ )
70
+
71
+ code = feedback_resp.full_python_code
72
+
73
+ # Update conversation history with assistant response
74
+ conversation.append(
75
+ {
76
+ "role": "assistant",
77
+ "content": feedback_resp.model_dump_json(),
78
+ }
79
+ )
80
+
81
+ # Format conversation history for display
82
+ conversation_text = ""
83
+
84
+ conversation_to_print = conversation[1:]
85
+
86
+ round_number = (
87
+ len(conversation_to_print) // 2
88
+ ) # Assuming each round has a user and assistant message
89
+
90
+ # Add the latest conversation pair to the top
91
+ if len(conversation_to_print) >= 2:
92
+ latest_pair = conversation_to_print[-2:]
93
+ conversation_text += f"## Version {round_number}\n\n"
94
+ for message in latest_pair:
95
+ if message["role"] != "system":
96
+ role = message["role"].capitalize()
97
+ try:
98
+ content = json.loads(message["content"])
99
+ content = content["commentary"]
100
+ except:
101
+ content = message["content"].split(
102
+ " And here is the existing code:"
103
+ )[0]
104
+ if content == "":
105
+ content = "_User edited the code directly_"
106
+
107
+ emoji = "πŸ‘€" if role == "User" else "πŸ€–"
108
+ conversation_text += f"**{emoji} {role}:** {content}\n\n"
109
+
110
+ # Add the rest of the conversation history
111
+ for i, message in enumerate(conversation_to_print[:-2]):
112
+ if message["role"] != "system":
113
+ if i % 2 == 0:
114
+ round_number = (len(conversation_to_print) - i) // 2
115
+ conversation_text += f"## Version {round_number-1}\n\n"
116
+
117
+ role = message["role"].capitalize()
118
+ try:
119
+ content = json.loads(message["content"])
120
+ content = content["commentary"]
121
+ except:
122
+ content = message["content"].split(
123
+ " And here is the existing code:"
124
+ )[0]
125
+ if content == "":
126
+ content = "_User edited the code directly_"
127
+
128
+ emoji = "πŸ‘€" if role == "User" else "πŸ€–"
129
+ conversation_text += f"**{emoji} {role}:** {content}\n\n"
130
+
131
+ return conversation, code, conversation_text
132
+
133
+ except ValidationError as ve:
134
+ error_msg = f"Response validation error: {ve}"
135
+ raise gr.Error(error_msg)
136
+ except Exception as e:
137
+ error_msg = f"An error occurred: {e}"
138
+ raise gr.Error(error_msg)
139
+
140
+
141
+ # Define the Gradio interface
142
+ with gr.Blocks(
143
+ title=APP_TITLE, theme=gr.themes.Ocean(), fill_width=True, fill_height=True
144
+ ) as demo:
145
+ gr.HTML(APP_HEADER)
146
+
147
+ with gr.Row():
148
+ with gr.Column(scale=1):
149
+ conversation_output = gr.Markdown(label="Chat History", height=500)
150
+
151
+ with gr.Column(scale=2):
152
+ code_output = gr.Code(
153
+ label="LLM Generated Code",
154
+ interactive=True,
155
+ language="python",
156
+ lines=30,
157
+ )
158
+ with gr.Row():
159
+ add_comments_btn = gr.Button("Add Comments πŸ’¬")
160
+ refactor_btn = gr.Button("Refactor πŸ”¨")
161
+
162
+ with gr.Row():
163
+ with gr.Column(scale=9):
164
+ user_input = gr.Textbox(
165
+ label="Enter Your Request here",
166
+ placeholder="Type something here...",
167
+ lines=2,
168
+ )
169
+ with gr.Column(scale=1):
170
+ submit_btn = gr.Button("Submit πŸš€")
171
+ reset_btn = gr.Button("Reset πŸ”„")
172
+
173
+ # Initialize conversation history with system prompt using Gradio State
174
+ initial_conversation = [
175
+ {
176
+ "role": "system",
177
+ "content": LLM_SYSTEM_PROMPT,
178
+ }
179
+ ]
180
+
181
+ conversation_state = gr.State(
182
+ initial_conversation
183
+ ) # Define a single State instance
184
+
185
+ # Define the button click event
186
+ def on_submit(user_input, conversation, current_code):
187
+ result = get_llm_responses(user_input, conversation, current_code)
188
+ return [""] + list(result) # Clear the textbox by returning an empty string
189
+
190
+ submit_btn.click(
191
+ fn=on_submit,
192
+ inputs=[user_input, conversation_state, code_output],
193
+ outputs=[user_input, conversation_state, code_output, conversation_output],
194
+ )
195
+
196
+ def add_comments_fn(conversation, current_code):
197
+ return on_submit(
198
+ "Please add more comments to the code. Make it production ready.",
199
+ conversation,
200
+ current_code,
201
+ )
202
+
203
+ add_comments_btn.click(
204
+ fn=add_comments_fn,
205
+ inputs=[conversation_state, code_output],
206
+ outputs=[user_input, conversation_state, code_output, conversation_output],
207
+ )
208
+
209
+ def refactor_fn(conversation, current_code):
210
+ return on_submit(
211
+ "Please refactor the code. Make it more efficient.",
212
+ conversation,
213
+ current_code,
214
+ )
215
+
216
+ refactor_btn.click(
217
+ fn=refactor_fn,
218
+ inputs=[conversation_state, code_output],
219
+ outputs=[user_input, conversation_state, code_output, conversation_output],
220
+ )
221
+
222
+ def reset_fn():
223
+ return "", initial_conversation, "", ""
224
+
225
+ reset_btn.click(
226
+ fn=reset_fn,
227
+ outputs=[user_input, conversation_state, code_output, conversation_output],
228
+ )
229
+
230
+ # Launch the Gradio app
231
+ if __name__ == "__main__":
232
+ demo.launch()