ai-tomoni commited on
Commit
2a03338
·
verified ·
1 Parent(s): 029884b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -10,6 +10,7 @@ client = InferenceClient(model=current_model, token=HF_TOKEN)
10
  conversation_history = []
11
 
12
  def enhanced_chat_response(user_input, max_tokens, temperature, top_p):
 
13
  if not user_input.strip():
14
  return "", "Bitte gib eine Nachricht ein.", "", ""
15
 
@@ -21,6 +22,7 @@ def enhanced_chat_response(user_input, max_tokens, temperature, top_p):
21
 
22
 
23
  messages = [system_prompt, {"role": "user", "content": user_input}]
 
24
  # Hier printen wir die messages vor dem API-Aufruf
25
  print("Messages sent to API:", messages)
26
 
@@ -56,7 +58,7 @@ def enhanced_chat_response(user_input, max_tokens, temperature, top_p):
56
  for message in client.chat_completion(
57
  messages=messages,
58
  max_tokens=min(max_tokens, 100),
59
- stream=True,
60
  temperature=temperature,
61
  top_p=top_p
62
  ):
 
10
  conversation_history = []
11
 
12
  def enhanced_chat_response(user_input, max_tokens, temperature, top_p):
13
+ print("inside enhanced_chat_response")
14
  if not user_input.strip():
15
  return "", "Bitte gib eine Nachricht ein.", "", ""
16
 
 
22
 
23
 
24
  messages = [system_prompt, {"role": "user", "content": user_input}]
25
+
26
  # Hier printen wir die messages vor dem API-Aufruf
27
  print("Messages sent to API:", messages)
28
 
 
58
  for message in client.chat_completion(
59
  messages=messages,
60
  max_tokens=min(max_tokens, 100),
61
+ stream=False,
62
  temperature=temperature,
63
  top_p=top_p
64
  ):