ai-tomoni commited on
Commit
b0d0d98
·
verified ·
1 Parent(s): 4e53a87

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -0
app.py CHANGED
@@ -12,6 +12,9 @@ client = InferenceClient(
12
  token=HF_TOKEN
13
  )
14
 
 
 
 
15
  def query_leolm(messages, max_tokens=200, temperature=1.0, top_p=0.9):
16
  """Query LeoLM-13B model via Hugging Face InferenceClient"""
17
  try:
@@ -54,6 +57,8 @@ def query_leolm(messages, max_tokens=200, temperature=1.0, top_p=0.9):
54
  raise Exception(f"Both methods failed - Chat: {str(chat_error)}, Text: {str(text_error)}")
55
 
56
  def enhanced_chat_response(user_input, max_tokens, temperature, top_p):
 
 
57
  if not user_input.strip():
58
  return "", "*Bitte gib eine Nachricht ein.*", ""
59
 
 
12
  token=HF_TOKEN
13
  )
14
 
15
+ # Initialize conversation history as global variable
16
+ conversation_history = []
17
+
18
  def query_leolm(messages, max_tokens=200, temperature=1.0, top_p=0.9):
19
  """Query LeoLM-13B model via Hugging Face InferenceClient"""
20
  try:
 
57
  raise Exception(f"Both methods failed - Chat: {str(chat_error)}, Text: {str(text_error)}")
58
 
59
  def enhanced_chat_response(user_input, max_tokens, temperature, top_p):
60
+ global conversation_history # Declare global variable
61
+
62
  if not user_input.strip():
63
  return "", "*Bitte gib eine Nachricht ein.*", ""
64