Julia Ostheimer commited on
Commit
7c247f1
·
1 Parent(s): 4afc1e8

Assign each chat a unique chat_hash (instead of taking session_hash)

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -10,6 +10,7 @@ from langgraph.graph import MessagesState, StateGraph, END
10
  from langgraph.prebuilt import ToolNode, tools_condition
11
 
12
  import structlog
 
13
 
14
  from qdrant_client import QdrantClient
15
  from qdrant_client.http.models import Distance, VectorParams, SparseVectorParams
@@ -89,6 +90,7 @@ graph = graph_builder.compile(checkpointer=memory)
89
  # -----
90
 
91
  def bot(message, history, source_history, request: gr.Request) -> list[Any]:
 
92
  """Generate bot response and history from message.
93
 
94
  With multi-modal inputs text and each file is treated as separate message.
@@ -107,10 +109,9 @@ def bot(message, history, source_history, request: gr.Request) -> list[Any]:
107
 
108
  # create text response
109
  user_input_prompt = message.get("text")
110
- current_session_id = request.session_hash
111
- logger.info("This is the current session id", session_id=current_session_id)
112
 
113
- config = {"configurable": {"thread_id": current_session_id}, "callbacks": [langfuse_handler], "stream": False}
114
 
115
  response = graph.invoke(
116
  {"messages": [{"role": "user", "content": user_input_prompt}]},
@@ -122,11 +123,16 @@ def bot(message, history, source_history, request: gr.Request) -> list[Any]:
122
  # Append source details of the response to source history
123
  source_history = build_source_history_object(response, source_history, user_input_prompt)
124
 
125
- return [response["messages"][-1].content], source_history
 
 
 
 
126
 
127
  with gr.Blocks() as demo:
128
  # Initialize source history as session state variable
129
  source_history = gr.State([])
 
130
 
131
  gr.Markdown("### Dies ist der aktuelle Prototyp der DEval AI4Kontextanalysen, mit dem Fokus auf dem Testen der Zitationsfunktion und der grundlegenden Konfiguration.")
132
  with gr.Tab("Chat"):
 
10
  from langgraph.prebuilt import ToolNode, tools_condition
11
 
12
  import structlog
13
+ import uuid
14
 
15
  from qdrant_client import QdrantClient
16
  from qdrant_client.http.models import Distance, VectorParams, SparseVectorParams
 
90
  # -----
91
 
92
  def bot(message, history, source_history, request: gr.Request) -> list[Any]:
93
+ def bot(message, history, source_history, chat_hash) -> list[Any]:
94
  """Generate bot response and history from message.
95
 
96
  With multi-modal inputs text and each file is treated as separate message.
 
109
 
110
  # create text response
111
  user_input_prompt = message.get("text")
112
+ logger.info("This is the current chat_hash", chat_hash=chat_hash)
 
113
 
114
+ config = {"configurable": {"thread_id": chat_hash}, "callbacks": [langfuse_handler], "stream": False}
115
 
116
  response = graph.invoke(
117
  {"messages": [{"role": "user", "content": user_input_prompt}]},
 
123
  # Append source details of the response to source history
124
  source_history = build_source_history_object(response, source_history, user_input_prompt)
125
 
126
+ return [response["messages"][-1].content], source_history, chat_hash
127
+ def create_chat_hash():
128
+ chat_hash = str(uuid.uuid4())
129
+ logger.info("This is the chat_hash when it is called", chat_hash=chat_hash)
130
+ return chat_hash
131
 
132
  with gr.Blocks() as demo:
133
  # Initialize source history as session state variable
134
  source_history = gr.State([])
135
+ chat_hash = gr.State(create_chat_hash)
136
 
137
  gr.Markdown("### Dies ist der aktuelle Prototyp der DEval AI4Kontextanalysen, mit dem Fokus auf dem Testen der Zitationsfunktion und der grundlegenden Konfiguration.")
138
  with gr.Tab("Chat"):