PLAZMAstudios commited on
Commit
9f63a22
·
verified ·
1 Parent(s): 3bfb256
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -14,28 +14,29 @@ def respond(message, history, max_new_tokens, temperature):
14
  """
15
  history: önceki mesajlar listesi
16
  """
 
 
17
  # Mesaj geçmişini chat formatına çevir
18
  chat = []
19
- for h in history or []:
20
- chat.append({"role": "user", "content": h})
 
21
  chat.append({"role": "user", "content": message})
22
-
23
  chat_text = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
24
  input_tokens = tokenizer(chat_text, return_tensors="pt").to(device)
25
 
26
  # Yanıt üretimi
27
  output_tokens = model.generate(
28
  **input_tokens,
29
- max_new_tokens=max_new_tokens,
30
- temperature=temperature,
31
  )
32
  output_text = tokenizer.batch_decode(output_tokens)[0]
33
 
34
  # History güncelle
35
- history = history or []
36
- history.append(message)
37
- history.append(output_text)
38
-
39
  return output_text, history
40
 
41
  # Gradio chat interface
 
14
  """
15
  history: önceki mesajlar listesi
16
  """
17
+ history = history or []
18
+
19
  # Mesaj geçmişini chat formatına çevir
20
  chat = []
21
+ for h in history:
22
+ if h["role"] == "user":
23
+ chat.append({"role": "user", "content": h["content"]})
24
  chat.append({"role": "user", "content": message})
25
+
26
  chat_text = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
27
  input_tokens = tokenizer(chat_text, return_tensors="pt").to(device)
28
 
29
  # Yanıt üretimi
30
  output_tokens = model.generate(
31
  **input_tokens,
32
+ max_new_tokens=max_new_tokens
 
33
  )
34
  output_text = tokenizer.batch_decode(output_tokens)[0]
35
 
36
  # History güncelle
37
+ history.append({"role": "user", "content": message})
38
+ history.append({"role": "assistant", "content": output_text})
39
+
 
40
  return output_text, history
41
 
42
  # Gradio chat interface