KS00Max commited on
Commit
73fa6e6
Β·
1 Parent(s): 79b07f9
Files changed (1) hide show
  1. app.py +54 -18
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import inspect
2
  import logging
3
  import uuid
4
- from typing import List, Tuple
5
 
6
  import gradio as gr
7
 
@@ -31,7 +31,7 @@ def _normalize_choice(choice_value: str | None) -> str | None:
31
 
32
 
33
  def _extract_text(item) -> str:
34
- """Extract plain text from any Gradio message format."""
35
  if item is None:
36
  return ""
37
  if isinstance(item, str):
@@ -54,29 +54,61 @@ def _extract_text(item) -> str:
54
  return str(item)
55
 
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  def respond(
58
  user_message: str,
59
- chat_history: List[Tuple[str | None, str | None]],
60
  app_state: dict,
61
  choice_value: str | None,
62
  ):
63
- """Main response handler using tuples format: [(user_msg, bot_msg), ...]"""
64
  state = app_state or init_state()
65
  session_id = state.get("session_id") or init_state()["session_id"]
66
  state["session_id"] = session_id
67
 
68
- # Ensure history is a clean list of tuples with plain strings
69
- history: List[Tuple[str | None, str | None]] = []
70
- if chat_history:
71
- for item in chat_history:
72
- if isinstance(item, (list, tuple)) and len(item) == 2:
73
- user_text = _extract_text(item[0])
74
- bot_text = _extract_text(item[1])
75
- history.append((user_text or None, bot_text or None))
76
 
77
  if engine is None:
78
  reply = f"εˆζœŸεŒ–γ‚¨γƒ©γƒΌ: {engine_error}. OPENAI_API_KEY γ‚’η’Ίθͺγ—てください。"
79
- history.append((user_message or "(ε…₯εŠ›γͺし)", reply))
 
80
  return history, state, "", gr.update(choices=[], value=None, visible=False), [], {}
81
 
82
  pending = state.get("pending", False)
@@ -93,7 +125,8 @@ def respond(
93
  else:
94
  if not user_message:
95
  warn = "θ³ͺ問をε…₯εŠ›γ—γ¦γγ γ•γ„γ€‚"
96
- history.append(("(ε…₯εŠ›γͺし)", warn))
 
97
  return history, state, "", gr.update(choices=[], value=None, visible=False), citations, trace
98
  result = engine.handle_user_message(session_id, user_message)
99
  user_bubble = user_message
@@ -102,7 +135,8 @@ def respond(
102
  state["pending"] = True
103
  options = [f"{c.id}: {c.text}" for c in result["question"].choices]
104
  trace = result.get("trace", {})
105
- history.append((user_bubble, result["reply"]))
 
106
  return (
107
  history,
108
  state,
@@ -116,7 +150,8 @@ def respond(
116
  state["pending"] = False
117
  citations = result.get("citations", [])
118
  trace = result.get("trace", {})
119
- history.append((user_bubble, result["reply"]))
 
120
  return (
121
  history,
122
  state,
@@ -127,7 +162,8 @@ def respond(
127
  )
128
 
129
  # Error fallback
130
- history.append((user_bubble, result.get("reply", "γ‚¨οΏ½οΏ½γƒΌγŒη™Ίη”Ÿγ—γΎγ—γŸγ€‚")))
 
131
  state["pending"] = False
132
  trace = result.get("trace", {})
133
  return history, state, "", gr.update(choices=[], value=None, visible=False), citations, trace
@@ -156,7 +192,7 @@ with gr.Blocks() as demo:
156
  )
157
  with gr.Row():
158
  with gr.Column(scale=3):
159
- # Tuples format: [(user_msg, bot_msg), ...]
160
  chatbot = gr.Chatbot(label="ε―Ύθ©±", height=520)
161
  user_input = gr.Textbox(
162
  label="θ³ͺ問をε…₯εŠ›",
 
1
  import inspect
2
  import logging
3
  import uuid
4
+ from typing import List, Dict
5
 
6
  import gradio as gr
7
 
 
31
 
32
 
33
  def _extract_text(item) -> str:
34
+ """Recursively extract plain text from any Gradio message format."""
35
  if item is None:
36
  return ""
37
  if isinstance(item, str):
 
54
  return str(item)
55
 
56
 
57
+ def _ensure_messages_format(history: List) -> List[Dict[str, str]]:
58
+ """Convert any history format to messages format: [{"role": "user", "content": "..."}, ...]"""
59
+ if not history:
60
+ return []
61
+
62
+ messages: List[Dict[str, str]] = []
63
+ for item in history:
64
+ if isinstance(item, dict) and "role" in item and "content" in item:
65
+ # Already messages format - extract clean text
66
+ content = _extract_text(item.get("content"))
67
+ if content:
68
+ messages.append({"role": str(item["role"]), "content": content})
69
+ elif hasattr(item, "role") and hasattr(item, "content"):
70
+ # ChatMessage object
71
+ content = _extract_text(getattr(item, "content"))
72
+ if content:
73
+ messages.append({"role": str(getattr(item, "role")), "content": content})
74
+ elif isinstance(item, (list, tuple)) and len(item) == 2:
75
+ # Tuple format (user, assistant)
76
+ user_text = _extract_text(item[0])
77
+ bot_text = _extract_text(item[1])
78
+ if user_text:
79
+ messages.append({"role": "user", "content": user_text})
80
+ if bot_text:
81
+ messages.append({"role": "assistant", "content": bot_text})
82
+
83
+ return messages
84
+
85
+
86
+ def _append_message(history: List[Dict], role: str, content: str) -> List[Dict]:
87
+ """Append a message in messages format."""
88
+ new_history = list(history)
89
+ if content:
90
+ new_history.append({"role": role, "content": content})
91
+ return new_history
92
+
93
+
94
  def respond(
95
  user_message: str,
96
+ chat_history: List,
97
  app_state: dict,
98
  choice_value: str | None,
99
  ):
100
+ """Main response handler using messages format."""
101
  state = app_state or init_state()
102
  session_id = state.get("session_id") or init_state()["session_id"]
103
  state["session_id"] = session_id
104
 
105
+ # Normalize history to clean messages format
106
+ history = _ensure_messages_format(chat_history)
 
 
 
 
 
 
107
 
108
  if engine is None:
109
  reply = f"εˆζœŸεŒ–γ‚¨γƒ©γƒΌ: {engine_error}. OPENAI_API_KEY γ‚’η’Ίθͺγ—てください。"
110
+ history = _append_message(history, "user", user_message or "(ε…₯εŠ›γͺし)")
111
+ history = _append_message(history, "assistant", reply)
112
  return history, state, "", gr.update(choices=[], value=None, visible=False), [], {}
113
 
114
  pending = state.get("pending", False)
 
125
  else:
126
  if not user_message:
127
  warn = "θ³ͺ問をε…₯εŠ›γ—γ¦γγ γ•γ„γ€‚"
128
+ history = _append_message(history, "user", "(ε…₯εŠ›γͺし)")
129
+ history = _append_message(history, "assistant", warn)
130
  return history, state, "", gr.update(choices=[], value=None, visible=False), citations, trace
131
  result = engine.handle_user_message(session_id, user_message)
132
  user_bubble = user_message
 
135
  state["pending"] = True
136
  options = [f"{c.id}: {c.text}" for c in result["question"].choices]
137
  trace = result.get("trace", {})
138
+ history = _append_message(history, "user", user_bubble)
139
+ history = _append_message(history, "assistant", result["reply"])
140
  return (
141
  history,
142
  state,
 
150
  state["pending"] = False
151
  citations = result.get("citations", [])
152
  trace = result.get("trace", {})
153
+ history = _append_message(history, "user", user_bubble)
154
+ history = _append_message(history, "assistant", result["reply"])
155
  return (
156
  history,
157
  state,
 
162
  )
163
 
164
  # Error fallback
165
+ history = _append_message(history, "user", user_bubble)
166
+ history = _append_message(history, "assistant", result.get("reply", "γ‚¨γƒ©γƒΌγŒη™Ίη”Ÿγ—γΎγ—γŸγ€‚"))
167
  state["pending"] = False
168
  trace = result.get("trace", {})
169
  return history, state, "", gr.update(choices=[], value=None, visible=False), citations, trace
 
192
  )
193
  with gr.Row():
194
  with gr.Column(scale=3):
195
+ # Messages format for Gradio 4.44+
196
  chatbot = gr.Chatbot(label="ε―Ύθ©±", height=520)
197
  user_input = gr.Textbox(
198
  label="θ³ͺ問をε…₯εŠ›",