haifasyn commited on
Commit
569f565
·
verified ·
1 Parent(s): 5f541a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -12
app.py CHANGED
@@ -34,23 +34,35 @@ def predict(message, history):
34
  # Memasukkan history agar chatbot punya ingatan
35
  messages = [{"role": "system", "content": system_prompt}]
36
 
37
- for entry in history:
38
- # Menangani format history Gradio yang bervariasi (list atau dict)
39
- if isinstance(entry, dict):
40
- messages.append(entry)
41
- elif isinstance(entry, (list, tuple)):
42
- messages.append({"role": "user", "content": str(entry[0])})
43
- messages.append({"role": "assistant", "content": str(entry[1])})
44
-
45
- messages.append({"role": "user", "content": str(message)})
46
-
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  text_prompt = tokenizer.apply_chat_template(
48
  messages,
49
  tokenize=False,
50
  add_generation_prompt=True
51
  )
52
-
53
- inputs = tokenizer(text_prompt, return_tensors="pt").to("cpu")
54
 
55
  with torch.no_grad():
56
  outputs = model.generate(
 
34
  # Memasukkan history agar chatbot punya ingatan
35
  messages = [{"role": "system", "content": system_prompt}]
36
 
37
+ if history:
38
+ for interaction in history:
39
+ if isinstance(interaction, dict):
40
+ # Format Gradio 4+ (Dictionary)
41
+ role = interaction.get("role", "user")
42
+ content = interaction.get("content", "")
43
+ # Jika content adalah list (multimodal), ambil elemen pertama
44
+ if isinstance(content, list): content = str(content[0])
45
+ messages.append({"role": role, "content": str(content)})
46
+ elif isinstance(interaction, (list, tuple)):
47
+ # Format Gradio Legacy [user, assistant]
48
+ u_msg = interaction[0] if len(interaction) > 0 else ""
49
+ a_msg = interaction[1] if len(interaction) > 1 else ""
50
+ # Paksa jadi string
51
+ if isinstance(u_msg, list): u_msg = str(u_msg[0])
52
+ if isinstance(a_msg, list): a_msg = str(a_msg[0])
53
+ messages.append({"role": "user", "content": str(u_msg)})
54
+ messages.append({"role": "assistant", "content": str(a_msg)})
55
+
56
+ user_input = message.get("text", "") if isinstance(message, dict) else str(message)
57
+ messages.append({"role": "user", "content": user_input})
58
+
59
+ # 4. Terapkan Template & Tokenisasi
60
  text_prompt = tokenizer.apply_chat_template(
61
  messages,
62
  tokenize=False,
63
  add_generation_prompt=True
64
  )
65
+ inputs = tokenizer(text_prompt, return_tensors="pt").to("cpu")
 
66
 
67
  with torch.no_grad():
68
  outputs = model.generate(