Lasdw commited on
Commit
b4bcd4c
·
1 Parent(s): 8e7137e

fixed input bug

Browse files
Files changed (1) hide show
  1. app.py +6 -20
app.py CHANGED
@@ -47,7 +47,7 @@ def chat_with_agent(question: str, file_uploads, history: list) -> tuple:
47
  Handle chat interaction with TurboNerd agent, now with file upload support.
48
  """
49
  if not question.strip() and not file_uploads:
50
- return history, "", "Remaining queries this hour: 5/5"
51
 
52
  try:
53
  # Use the history object's ID as a session identifier
@@ -65,17 +65,6 @@ def chat_with_agent(question: str, file_uploads, history: list) -> tuple:
65
  history.append({"role": "user", "content": question})
66
  session_histories[session_id].append({"role": "user", "content": question})
67
 
68
- # Check rate limit
69
- # if not query_limiter.is_allowed(session_id):
70
- # remaining_time = query_limiter.get_time_until_reset(session_id)
71
- # error_message = (
72
- # f"Rate limit exceeded. You can make {query_limiter.max_queries} queries per hour. Think of my bank account🙏. "
73
- # f"Please wait {int(remaining_time)} seconds before trying again."
74
- # )
75
- # history.append({"role": "assistant", "content": error_message})
76
- # session_histories[session_id].append({"role": "assistant", "content": error_message})
77
- # return history, "", f"Remaining queries this hour: 0/{query_limiter.max_queries}"
78
-
79
  # Initialize agent
80
  agent = TurboNerd()
81
 
@@ -101,7 +90,7 @@ def chat_with_agent(question: str, file_uploads, history: list) -> tuple:
101
 
102
  if file_info:
103
  if question.strip():
104
- question = question + file_info
105
  else:
106
  question = f"Please analyze these files: {file_info}"
107
 
@@ -149,14 +138,11 @@ def chat_with_agent(question: str, file_uploads, history: list) -> tuple:
149
  else:
150
  formatted_response = response
151
 
152
- # Add remaining queries info
153
- # remaining_queries = query_limiter.get_remaining_queries(session_id)
154
-
155
  # Add response to both histories
156
  history.append({"role": "assistant", "content": formatted_response})
157
  session_histories[session_id].append({"role": "assistant", "content": formatted_response})
158
 
159
- return history, "", "Remaining queries this hour: 5/5"
160
  except RecursionError as e:
161
  error_message = (
162
  "I apologize, but I've reached my thinking limit while trying to answer your question. "
@@ -167,7 +153,7 @@ def chat_with_agent(question: str, file_uploads, history: list) -> tuple:
167
  history.append({"role": "assistant", "content": error_message})
168
  if session_id in session_histories:
169
  session_histories[session_id].append({"role": "assistant", "content": error_message})
170
- return history, "", "Remaining queries this hour: 5/5"
171
  except Exception as e:
172
  error_str = str(e).lower()
173
  if "credit" in error_str or "quota" in error_str or "limit" in error_str or "exceeded" in error_str or "OPENAI_API_KEY" in error_str or "TAVILY_API_KEY" in error_str:
@@ -180,11 +166,11 @@ def chat_with_agent(question: str, file_uploads, history: list) -> tuple:
180
  history.append({"role": "assistant", "content": error_message})
181
  if session_id in session_histories:
182
  session_histories[session_id].append({"role": "assistant", "content": error_message})
183
- return history, "", "Remaining queries this hour: 5/5"
184
 
185
  def clear_chat():
186
  """Clear the chat history."""
187
- return [], "", None, "Remaining queries this hour: 5/5"
188
 
189
  # --- Evaluation Functions ---
190
  def run_and_submit_all(profile: gr.OAuthProfile | None):
 
47
  Handle chat interaction with TurboNerd agent, now with file upload support.
48
  """
49
  if not question.strip() and not file_uploads:
50
+ return history, ""
51
 
52
  try:
53
  # Use the history object's ID as a session identifier
 
65
  history.append({"role": "user", "content": question})
66
  session_histories[session_id].append({"role": "user", "content": question})
67
 
 
 
 
 
 
 
 
 
 
 
 
68
  # Initialize agent
69
  agent = TurboNerd()
70
 
 
90
 
91
  if file_info:
92
  if question.strip():
93
+ question = f"{question}\n{file_info}"
94
  else:
95
  question = f"Please analyze these files: {file_info}"
96
 
 
138
  else:
139
  formatted_response = response
140
 
 
 
 
141
  # Add response to both histories
142
  history.append({"role": "assistant", "content": formatted_response})
143
  session_histories[session_id].append({"role": "assistant", "content": formatted_response})
144
 
145
+ return history, ""
146
  except RecursionError as e:
147
  error_message = (
148
  "I apologize, but I've reached my thinking limit while trying to answer your question. "
 
153
  history.append({"role": "assistant", "content": error_message})
154
  if session_id in session_histories:
155
  session_histories[session_id].append({"role": "assistant", "content": error_message})
156
+ return history, ""
157
  except Exception as e:
158
  error_str = str(e).lower()
159
  if "credit" in error_str or "quota" in error_str or "limit" in error_str or "exceeded" in error_str or "OPENAI_API_KEY" in error_str or "TAVILY_API_KEY" in error_str:
 
166
  history.append({"role": "assistant", "content": error_message})
167
  if session_id in session_histories:
168
  session_histories[session_id].append({"role": "assistant", "content": error_message})
169
+ return history, ""
170
 
171
  def clear_chat():
172
  """Clear the chat history."""
173
+ return [], ""
174
 
175
  # --- Evaluation Functions ---
176
  def run_and_submit_all(profile: gr.OAuthProfile | None):