Solarum Asteridion commited on
Commit
afe890a
·
verified ·
1 Parent(s): 7b5596a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -34
app.py CHANGED
@@ -18,12 +18,17 @@ logger = logging.getLogger(__name__)
18
  def setup_openai_auth():
19
  api_key = os.environ.get("LLM_API_KEY")
20
  if not api_key:
21
- raise Exception("LLM API authentication failed. Please set your API key.")
22
  client = OpenAI(api_key=api_key)
23
  return client
24
 
25
  # Initialize OpenAI client
26
- client = setup_openai_auth()
 
 
 
 
 
27
 
28
  class OpenAILLMHandler:
29
  def __init__(self):
@@ -40,6 +45,10 @@ class OpenAILLMHandler:
40
  logger.info("Returning cached response.")
41
  return cached_response
42
 
 
 
 
 
43
  try:
44
  with self.generation_lock:
45
  response = self.client.chat.completions.create(
@@ -99,9 +108,9 @@ def generate_response(user_message: str, conversation_history: list) -> str:
99
 
100
  prompt_parts = [generate_system_message(current_time, now)]
101
 
102
- for message in conversation_history:
103
- prefix = "User: " if message["role"] == "user" else "Assistant: "
104
- prompt_parts.append(f"{prefix}{message['content']}")
105
 
106
  prompt_parts.append(f"User: {user_message}\nAssistant:")
107
  prompt = "\n\n".join(prompt_parts)
@@ -113,12 +122,11 @@ def chatbot_interface(user_message: str, history: list) -> list:
113
  return history
114
 
115
  if not llm_handler.model:
116
- history.append({"role": "system", "content": "Error: Please call the Human first."})
117
  return history
118
 
119
  ai_response = generate_response(user_message, history)
120
- history.append({"role": "user", "content": user_message})
121
- history.append({"role": "assistant", "content": ai_response})
122
  return history
123
 
124
  # Enhanced Gradio UI with improved CSS and layout
@@ -151,13 +159,11 @@ body, .gradio-container {
151
  .user-message {
152
  background-color: #d1e7dd;
153
  align-self: flex-end;
154
- margin-left: auto;
155
  }
156
 
157
  .bot-message {
158
  background-color: #f8d7da;
159
  align-self: flex-start;
160
- margin-right: auto;
161
  }
162
 
163
  #textbox {
@@ -167,7 +173,7 @@ body, .gradio-container {
167
  border-radius: 5px;
168
  }
169
 
170
- #send-button, #load-button {
171
  background-color: #0d6efd;
172
  color: white;
173
  border: none;
@@ -177,7 +183,7 @@ body, .gradio-container {
177
  margin-left: 10px;
178
  }
179
 
180
- #send-button:hover, #load-button:hover {
181
  background-color: #0b5ed7;
182
  }
183
 
@@ -185,16 +191,20 @@ body, .gradio-container {
185
  background-color: #6c757d !important;
186
  cursor: not-allowed;
187
  }
 
 
 
 
188
  """
189
 
190
  with gr.Blocks(css=custom_css) as demo:
191
  gr.Markdown("<h1 style='text-align: center; color: #0d6efd;'>Human.</h1>")
192
 
193
  with gr.Row():
194
- load_button = gr.Button("Call Human", variant="primary", elem_id="load-button")
195
  model_status = gr.Textbox(
196
  label="Human Arrival Status",
197
- value="Human Not Listening.",
198
  interactive=False,
199
  elem_id="model-status"
200
  )
@@ -216,35 +226,17 @@ with gr.Blocks(css=custom_css) as demo:
216
  )
217
  send = gr.Button("➤", elem_id="send-button")
218
 
219
- def load_model_click():
220
- if llm_handler.model:
221
- return "Human Already Listening."
222
- try:
223
- # Reload the model name from environment if needed
224
- llm_handler.model = os.environ.get("MODEL_NAME", "gpt-4")
225
- if not llm_handler.model:
226
- return "Failed to load model. Please set the MODEL_NAME environment variable."
227
- return "Human Called Successfully."
228
- except Exception as e:
229
- logger.error(f"Error loading model: {e}")
230
- return f"Error loading model: {str(e)}"
231
-
232
  def update_chat(user_message, history):
233
  if not user_message.strip():
234
  return history, gr.update(value="")
235
  if not llm_handler.model:
236
- history.append({"role": "system", "content": "Error: Please call the Human first."})
237
  return history, gr.update(value="")
238
 
239
  updated_history = chatbot_interface(user_message, history)
240
  return updated_history, gr.update(value="")
241
 
242
  # Event handlers
243
- load_button.click(
244
- load_model_click,
245
- outputs=[model_status]
246
- )
247
-
248
  send.click(
249
  update_chat,
250
  inputs=[msg, chatbot],
@@ -258,4 +250,7 @@ with gr.Blocks(css=custom_css) as demo:
258
  )
259
 
260
  if __name__ == "__main__":
261
- demo.launch(share=True)
 
 
 
 
18
  def setup_openai_auth():
19
  api_key = os.environ.get("LLM_API_KEY")
20
  if not api_key:
21
+ raise Exception("LLM API authentication failed. Please set your LLM_API_KEY environment variable.")
22
  client = OpenAI(api_key=api_key)
23
  return client
24
 
25
  # Initialize OpenAI client
26
+ try:
27
+ client = setup_openai_auth()
28
+ logger.info("OpenAI client initialized successfully.")
29
+ except Exception as e:
30
+ logger.error(f"Failed to initialize OpenAI client: {e}")
31
+ client = None # Handle gracefully in the application
32
 
33
  class OpenAILLMHandler:
34
  def __init__(self):
 
45
  logger.info("Returning cached response.")
46
  return cached_response
47
 
48
+ if not self.client:
49
+ logger.error("OpenAI client is not initialized.")
50
+ return "Error: AI service is unavailable."
51
+
52
  try:
53
  with self.generation_lock:
54
  response = self.client.chat.completions.create(
 
108
 
109
  prompt_parts = [generate_system_message(current_time, now)]
110
 
111
+ for user_msg, bot_msg in conversation_history:
112
+ prompt_parts.append(f"User: {user_msg}")
113
+ prompt_parts.append(f"Assistant: {bot_msg}")
114
 
115
  prompt_parts.append(f"User: {user_message}\nAssistant:")
116
  prompt = "\n\n".join(prompt_parts)
 
122
  return history
123
 
124
  if not llm_handler.model:
125
+ history.append(("System", "Error: AI service is unavailable."))
126
  return history
127
 
128
  ai_response = generate_response(user_message, history)
129
+ history.append((user_message, ai_response))
 
130
  return history
131
 
132
  # Enhanced Gradio UI with improved CSS and layout
 
159
  .user-message {
160
  background-color: #d1e7dd;
161
  align-self: flex-end;
 
162
  }
163
 
164
  .bot-message {
165
  background-color: #f8d7da;
166
  align-self: flex-start;
 
167
  }
168
 
169
  #textbox {
 
173
  border-radius: 5px;
174
  }
175
 
176
+ #send-button {
177
  background-color: #0d6efd;
178
  color: white;
179
  border: none;
 
183
  margin-left: 10px;
184
  }
185
 
186
+ #send-button:hover {
187
  background-color: #0b5ed7;
188
  }
189
 
 
191
  background-color: #6c757d !important;
192
  cursor: not-allowed;
193
  }
194
+
195
+ #model-status {
196
+ display: none; /* Hide the model status as "Call Human" is removed */
197
+ }
198
  """
199
 
200
  with gr.Blocks(css=custom_css) as demo:
201
  gr.Markdown("<h1 style='text-align: center; color: #0d6efd;'>Human.</h1>")
202
 
203
  with gr.Row():
204
+ # Removed the "Call Human" button
205
  model_status = gr.Textbox(
206
  label="Human Arrival Status",
207
+ value="", # Empty since the button is removed
208
  interactive=False,
209
  elem_id="model-status"
210
  )
 
226
  )
227
  send = gr.Button("➤", elem_id="send-button")
228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
  def update_chat(user_message, history):
230
  if not user_message.strip():
231
  return history, gr.update(value="")
232
  if not llm_handler.model:
233
+ history.append(("System", "Error: AI service is unavailable."))
234
  return history, gr.update(value="")
235
 
236
  updated_history = chatbot_interface(user_message, history)
237
  return updated_history, gr.update(value="")
238
 
239
  # Event handlers
 
 
 
 
 
240
  send.click(
241
  update_chat,
242
  inputs=[msg, chatbot],
 
250
  )
251
 
252
  if __name__ == "__main__":
253
+ if client:
254
+ demo.launch(share=True)
255
+ else:
256
+ logger.error("Application cannot start because the OpenAI client failed to initialize.")