Solarum Asteridion commited on
Commit
9fb0ac8
·
verified ·
1 Parent(s): 5a1081c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -22,7 +22,7 @@ class MemoryTracker:
22
  def clear_memory():
23
  gc.collect()
24
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
25
-
26
  logging.basicConfig(
27
  level=logging.INFO,
28
  format='%(asctime)s - %(levelname)s - %(message)s'
@@ -257,7 +257,10 @@ def generate_response(user_message: str, conversation_history: List[Dict[str, st
257
  prompt_parts.append(f"User: {user_message}\nAssistant:")
258
  prompt = "\n\n".join(prompt_parts)
259
 
260
- return llm_handler.generate_response(prompt)
 
 
 
261
 
262
  def chatbot_interface(user_message: str, history: Optional[List[Dict[str, str]]] = None):
263
  if history is None:
@@ -365,4 +368,4 @@ with gr.Blocks(css=custom_css) as demo:
365
  )
366
 
367
  if __name__ == "__main__":
368
- demo.launch(share=True)
 
22
  def clear_memory():
23
  gc.collect()
24
  torch.cuda.empty_cache() if torch.cuda.is_available() else None
25
+
26
  logging.basicConfig(
27
  level=logging.INFO,
28
  format='%(asctime)s - %(levelname)s - %(message)s'
 
257
  prompt_parts.append(f"User: {user_message}\nAssistant:")
258
  prompt = "\n\n".join(prompt_parts)
259
 
260
+ # Increase max_length to accommodate longer inputs
261
+ max_length = 512 # You can adjust this value as needed
262
+
263
+ return llm_handler.generate_response(prompt, max_length)
264
 
265
  def chatbot_interface(user_message: str, history: Optional[List[Dict[str, str]]] = None):
266
  if history is None:
 
368
  )
369
 
370
  if __name__ == "__main__":
371
+ demo.launch(share=True)