Update utils.py
Browse files
utils.py
CHANGED
|
@@ -44,7 +44,7 @@ from langchain_community.tools import DuckDuckGoSearchRun
|
|
| 44 |
from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever
|
| 45 |
from typing import Dict, TypedDict
|
| 46 |
from langchain_core.messages import BaseMessage
|
| 47 |
-
from
|
| 48 |
from langchain.prompts import PromptTemplate
|
| 49 |
|
| 50 |
|
|
@@ -293,16 +293,6 @@ def grade_documents_direct(prompt, documents):
|
|
| 293 |
# LLM
|
| 294 |
model = ChatOpenAI(temperature=0.3, model="gpt-3.5-turbo-1106", streaming=True)
|
| 295 |
|
| 296 |
-
"""
|
| 297 |
-
xxx = Du bist ein Bewerter, der die Relevanz von einem erhaltenen Dokument zu einer Nutzeranfrage bewerten soll. \n
|
| 298 |
-
Hier ist das erhaltene Dokument: \n\n {context} \n\n
|
| 299 |
-
Hier ist die Nutzeranfrage: {question} \n
|
| 300 |
-
Wenn das erhaltene Dokument Keywörter oder semantische Bedeutung in Bezug auf die Nutzeranfrage hat, bewerte es als relevant. \n
|
| 301 |
-
Gib eine binäre Bewertung von 'ja' oder 'nein' Bewertung, um anzuzeigen ob das Dokuemnt relevant ist zur Nutzeranfrage oder nicht.
|
| 302 |
-
#grade_prompt = PromptTemplate(input_variables = ["context", "question"],
|
| 303 |
-
template = xxx)
|
| 304 |
-
|
| 305 |
-
"""
|
| 306 |
# Tool
|
| 307 |
grade_tool_oai = convert_to_openai_tool(grade)
|
| 308 |
|
|
@@ -327,8 +317,7 @@ def grade_documents_direct(prompt, documents):
|
|
| 327 |
|
| 328 |
# Chain
|
| 329 |
chain = prompt_gesamt | llm_with_tool | parser_tool
|
| 330 |
-
|
| 331 |
-
print(prompt)
|
| 332 |
# Score
|
| 333 |
filtered_docs = []
|
| 334 |
for d in documents:
|
|
@@ -408,54 +397,6 @@ def rag_chain(llm, prompt, retriever):
|
|
| 408 |
|
| 409 |
|
| 410 |
|
| 411 |
-
"""
|
| 412 |
-
workflow = StateGraph(GraphState)
|
| 413 |
-
|
| 414 |
-
# Define the nodes
|
| 415 |
-
workflow.add_node("retrieve", retrieve) # retrieve
|
| 416 |
-
workflow.add_node("grade_documents", grade_documents) # grade documents
|
| 417 |
-
workflow.add_node("generate", generate) # generate
|
| 418 |
-
#workflow.add_node("generate_ohne", generate) # generate ohne dokumente anzuhängen
|
| 419 |
-
workflow.add_node("transform_query", transform_query) # transform_query
|
| 420 |
-
#momentan nicht genutzt
|
| 421 |
-
#workflow.add_node("web_search", web_search) # web search
|
| 422 |
-
###
|
| 423 |
-
# Fügen Sie einen Zwischenknoten hinzu, um von transform_query zu retrieve zurückzukehren
|
| 424 |
-
workflow.add_node("retrieve_redirect", retrieve) # Dies könnte eine Wrapper-Funktion sein, die retrieve aufruft
|
| 425 |
-
|
| 426 |
-
# Build graph
|
| 427 |
-
workflow.set_entry_point("retrieve")
|
| 428 |
-
workflow.add_edge("retrieve", "grade_documents")
|
| 429 |
-
workflow.add_conditional_edges(
|
| 430 |
-
"grade_documents",
|
| 431 |
-
decide_to_generate,
|
| 432 |
-
{
|
| 433 |
-
"transform_query": "transform_query",
|
| 434 |
-
#"generate_ohne": "generate_ohne",
|
| 435 |
-
"generate": "generate",
|
| 436 |
-
},
|
| 437 |
-
)
|
| 438 |
-
workflow.add_edge("transform_query", "retrieve_redirect")
|
| 439 |
-
workflow.add_edge("retrieve_redirect", "retrieve")
|
| 440 |
-
|
| 441 |
-
#workflow.add_edge("generate_ohne", "generate")
|
| 442 |
-
workflow.add_edge("generate", END)
|
| 443 |
-
# Compile
|
| 444 |
-
app = workflow.compile()
|
| 445 |
-
#Dokumente suchen
|
| 446 |
-
inputs = {"keys": {"question": prompt}}
|
| 447 |
-
for output in app.stream(inputs):
|
| 448 |
-
for key, value in output.items():
|
| 449 |
-
# Node
|
| 450 |
-
pprint.pprint(f"Node '{key}':")
|
| 451 |
-
# Optional: print full state at each node
|
| 452 |
-
# pprint.pprint(value["keys"], indent=2, width=80, depth=None)
|
| 453 |
-
pprint.pprint("\n---\n")
|
| 454 |
-
|
| 455 |
-
# Final generation
|
| 456 |
-
return value['keys']['generation']
|
| 457 |
-
"""
|
| 458 |
-
|
| 459 |
############################################
|
| 460 |
# rag_chain Alternative für RAg mit Bild-Upload, da hier das llm so nicht genutzt werden kann und der prompt mit den RAG Erweiterungen anders übergeben wird
|
| 461 |
#langchain nutzen, um prompt an llm zu leiten, aber vorher in der VektorDB suchen, um passende splits zum Prompt hinzuzufügen
|
|
@@ -1083,312 +1024,3 @@ class CustomDocTemplate(SimpleDocTemplate):
|
|
| 1083 |
self.canv.restoreState()
|
| 1084 |
|
| 1085 |
|
| 1086 |
-
######################################################################
|
| 1087 |
-
#Zustandsgraph für Langgraph, um RAG zu implementieren mit verschiedenen Zuständen
|
| 1088 |
-
#die durchlaufen werden:
|
| 1089 |
-
#1. Dokumente aus vektorstore bekommen
|
| 1090 |
-
#2. die Relevanz ddr Dokuemnte einschätzen
|
| 1091 |
-
#3. wenn zu wenig relevante infos: Frage neu formulieren
|
| 1092 |
-
#4. nochmal 1. und 2.
|
| 1093 |
-
#5. wenn nun genug relevante Dokumente: Anfrage an Modell mit den Doks
|
| 1094 |
-
#6. wenn nicht gneug Dokumente relevant: Anfrage an Modell ohne Doks
|
| 1095 |
-
#####################################################################
|
| 1096 |
-
|
| 1097 |
-
# Zustandsgraph als Datenstruktur zum Umsetzen
|
| 1098 |
-
class GraphState(TypedDict):
|
| 1099 |
-
"""
|
| 1100 |
-
Represents the state of our graph.
|
| 1101 |
-
|
| 1102 |
-
Attributes:
|
| 1103 |
-
keys: A dictionary where each key is a string.
|
| 1104 |
-
"""
|
| 1105 |
-
keys: Dict[str, any]
|
| 1106 |
-
|
| 1107 |
-
|
| 1108 |
-
#Methoden, um den Graph und die Zustände umzusetzen
|
| 1109 |
-
### Nodes ###
|
| 1110 |
-
# die Knoten des Graphen definieren, die der Reihe noch (bzw. je nach Outcome des Vorgänger Knotens) durchlaufen werden
|
| 1111 |
-
def retrieve(state, retriever):
|
| 1112 |
-
"""
|
| 1113 |
-
Retrieve documents
|
| 1114 |
-
Args:
|
| 1115 |
-
state (dict): The current graph state
|
| 1116 |
-
Returns:
|
| 1117 |
-
state (dict): New keys added to state: documents, that contains retrieved documents, der wievielte Versuch gemacht wird
|
| 1118 |
-
"""
|
| 1119 |
-
print("---RETRIEVE ---")
|
| 1120 |
-
state_dict = state["keys"]
|
| 1121 |
-
question = state_dict["question"]
|
| 1122 |
-
documents = retriever.get_relevant_documents(question)
|
| 1123 |
-
second_trial="ja"
|
| 1124 |
-
if 'second_trial' in state_dict:
|
| 1125 |
-
print("second time")
|
| 1126 |
-
second_trail = "ja"
|
| 1127 |
-
else:
|
| 1128 |
-
print("first time")
|
| 1129 |
-
second_trial="nein"
|
| 1130 |
-
return {"keys": {"documents": documents, "second_trial":second_trial, "question": question, }}
|
| 1131 |
-
|
| 1132 |
-
|
| 1133 |
-
def retrieve_redirect(state):
|
| 1134 |
-
"""
|
| 1135 |
-
Retrieve redirect (wenn nach transform:question neues retrieven gemacht werden soll)
|
| 1136 |
-
Args:
|
| 1137 |
-
state (dict): The current graph state
|
| 1138 |
-
Returns:
|
| 1139 |
-
state (dict): New key added to state: second_trial
|
| 1140 |
-
"""
|
| 1141 |
-
print("---RETRIEVE REDIRECT---")
|
| 1142 |
-
second_trial="ja"
|
| 1143 |
-
state_dict = state["keys"]
|
| 1144 |
-
question= state_dict["question"]
|
| 1145 |
-
documents= state_dict["documents"]
|
| 1146 |
-
return {"keys": {"documents": documents, "second_trial":second_trial, "question": question, }}
|
| 1147 |
-
|
| 1148 |
-
|
| 1149 |
-
|
| 1150 |
-
def generate(state):
|
| 1151 |
-
"""
|
| 1152 |
-
Generate answer
|
| 1153 |
-
Args:
|
| 1154 |
-
state (dict): The current graph state
|
| 1155 |
-
Returns:
|
| 1156 |
-
state (dict): New key added to state, generation, that contains LLM generation
|
| 1157 |
-
"""
|
| 1158 |
-
print("---GENERATE---")
|
| 1159 |
-
state_dict = state["keys"]
|
| 1160 |
-
question = state_dict["question"]
|
| 1161 |
-
documents = state_dict["documents"]
|
| 1162 |
-
|
| 1163 |
-
# Prompt
|
| 1164 |
-
prompt = hub.pull("rlm/rag-prompt")
|
| 1165 |
-
|
| 1166 |
-
# LLM
|
| 1167 |
-
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.3, streaming=True)
|
| 1168 |
-
|
| 1169 |
-
# Post-processing
|
| 1170 |
-
#def format_docs(docs):
|
| 1171 |
-
#return "\n\n".join(doc.page_content for doc in docs)
|
| 1172 |
-
|
| 1173 |
-
# Chain
|
| 1174 |
-
rag_chain = prompt | llm | StrOutputParser()
|
| 1175 |
-
|
| 1176 |
-
# Run
|
| 1177 |
-
generation = rag_chain.invoke({"context": documents, "question": question})
|
| 1178 |
-
return {
|
| 1179 |
-
"keys": {"documents": documents, "question": question, "generation": generation}
|
| 1180 |
-
}
|
| 1181 |
-
|
| 1182 |
-
def generate_ohne(state):
|
| 1183 |
-
"""
|
| 1184 |
-
Generate answer
|
| 1185 |
-
Args:
|
| 1186 |
-
state (dict): The current graph state
|
| 1187 |
-
Returns:
|
| 1188 |
-
state (dict): New key added to state, generation, that contains LLM generation
|
| 1189 |
-
"""
|
| 1190 |
-
print("---GENERATE OHNE---")
|
| 1191 |
-
state_dict = state["keys"]
|
| 1192 |
-
question = state_dict["question"]
|
| 1193 |
-
#documents = state_dict["documents"]
|
| 1194 |
-
|
| 1195 |
-
# Prompt
|
| 1196 |
-
prompt = PromptTemplate(
|
| 1197 |
-
template="""\Antworte in deutsch, wenn es nicht explizit anders gefordert wird. Wenn du die Antwort nicht kennst, antworte direkt, dass du es nicht weißt.
|
| 1198 |
-
Versuche nicht es zu umschreiben. Versuche nicht, die Antwort zu erfinden oder aufzumocken. Halte die Antwort kurz aber ausführlich genug und exakt. \n\n
|
| 1199 |
-
Hier ist die Useranfrage: {question} """,
|
| 1200 |
-
input_variables=["question"])
|
| 1201 |
-
|
| 1202 |
-
# LLM
|
| 1203 |
-
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.3, streaming=True)
|
| 1204 |
-
|
| 1205 |
-
# Post-processing
|
| 1206 |
-
#def format_docs(docs):
|
| 1207 |
-
#return "\n\n".join(doc.page_content for doc in docs)
|
| 1208 |
-
|
| 1209 |
-
# Chain
|
| 1210 |
-
llm_chain = prompt | llm | StrOutputParser()
|
| 1211 |
-
|
| 1212 |
-
# Run
|
| 1213 |
-
generation = llm_chain.invoke({ "question": question})
|
| 1214 |
-
return {
|
| 1215 |
-
"keys": {"question": question, "generation": generation}
|
| 1216 |
-
}
|
| 1217 |
-
|
| 1218 |
-
|
| 1219 |
-
def grade_documents(state):
|
| 1220 |
-
"""
|
| 1221 |
-
Determines whether the retrieved documents are relevant to the question.
|
| 1222 |
-
Args:
|
| 1223 |
-
state (dict): The current graph state
|
| 1224 |
-
Returns:
|
| 1225 |
-
state (dict): Updates documents key with relevant documents
|
| 1226 |
-
"""
|
| 1227 |
-
|
| 1228 |
-
print("---CHECK RELEVANCE---")
|
| 1229 |
-
state_dict = state["keys"]
|
| 1230 |
-
question = state_dict["question"]
|
| 1231 |
-
documents = state_dict["documents"]
|
| 1232 |
-
second_trial =state_dict["second_trial"]
|
| 1233 |
-
|
| 1234 |
-
# Data model
|
| 1235 |
-
class grade(BaseModel):
|
| 1236 |
-
"""Binary score for relevance check."""
|
| 1237 |
-
binary_score: str = Field(description="Relevanz Bewertung 'ja' oder 'nein'")
|
| 1238 |
-
|
| 1239 |
-
# LLM
|
| 1240 |
-
model = ChatOpenAI(temperature=0.3, model="gpt-4-0125-preview", streaming=True)
|
| 1241 |
-
|
| 1242 |
-
# Tool
|
| 1243 |
-
grade_tool_oai = convert_to_openai_tool(grade)
|
| 1244 |
-
|
| 1245 |
-
# LLM with tool and enforce invocation
|
| 1246 |
-
llm_with_tool = model.bind(
|
| 1247 |
-
tools=[convert_to_openai_tool(grade_tool_oai)],
|
| 1248 |
-
tool_choice={"type": "function", "function": {"name": "grade"}},
|
| 1249 |
-
)
|
| 1250 |
-
|
| 1251 |
-
# Parser
|
| 1252 |
-
parser_tool = PydanticToolsParser(tools=[grade])
|
| 1253 |
-
|
| 1254 |
-
# Prompt
|
| 1255 |
-
prompt = PromptTemplate(
|
| 1256 |
-
template="""Du bist ein Bewerter, der die Relevanz von einem erhaltenen Dokument zu einer Nutzeranfrage bewerten soll. \n
|
| 1257 |
-
Hier ist das erhaltene Dokument: \n\n {context} \n\n
|
| 1258 |
-
Hier ist die Nutzeranfrage: {question} \n
|
| 1259 |
-
Wenn das erhaltene Dokument Keywörter oder semantische Bedeutung in Bezug auf die Nutzeranfrage hat, bewerte es als relevant. \n
|
| 1260 |
-
Gib eine binäre Bewertung von 'ja' oder 'nein' Bewertung, um anzuzeigen ob das Dokuemnt relevant ist zur Nutzeranfrage oder nicht.""",
|
| 1261 |
-
input_variables=["context", "question"],
|
| 1262 |
-
)
|
| 1263 |
-
|
| 1264 |
-
# Chain
|
| 1265 |
-
chain = prompt | llm_with_tool | parser_tool
|
| 1266 |
-
|
| 1267 |
-
# Score
|
| 1268 |
-
filtered_docs = []
|
| 1269 |
-
anzahl_relevant = 0
|
| 1270 |
-
search = "nein" # Default do not opt for re-questioning to supplement retrieval
|
| 1271 |
-
for d in documents:
|
| 1272 |
-
score = chain.invoke({"question": question, "context": d.page_content})
|
| 1273 |
-
grade = score[0].binary_score
|
| 1274 |
-
if grade == "ja":
|
| 1275 |
-
#search = "nein" # mind. ein relevantes Dokument -> keine Websuche nötig
|
| 1276 |
-
print("---Bewertung: Dokument ist relevant---")
|
| 1277 |
-
anzahl_relevant = anzahl_relevant +1
|
| 1278 |
-
filtered_docs.append(d)
|
| 1279 |
-
else:
|
| 1280 |
-
print("---Bewertung: Dokument irrelevant---")
|
| 1281 |
-
search = "ja" # mind ein Dokument irrelevant -> Frage umformulieren
|
| 1282 |
-
continue
|
| 1283 |
-
#wenn mehrheit der Dokumente relevant -> generieren starten damit
|
| 1284 |
-
if (anzahl_relevant>= len(documents)/2):
|
| 1285 |
-
search = "nein"
|
| 1286 |
-
print("second trial grade_docs:.....................")
|
| 1287 |
-
print(second_trial)
|
| 1288 |
-
return {
|
| 1289 |
-
"keys": {
|
| 1290 |
-
"documents": filtered_docs,
|
| 1291 |
-
"question": question,
|
| 1292 |
-
"search_again": search,
|
| 1293 |
-
"second_trial": second_trial
|
| 1294 |
-
}
|
| 1295 |
-
}
|
| 1296 |
-
|
| 1297 |
-
|
| 1298 |
-
def transform_query(state):
|
| 1299 |
-
"""
|
| 1300 |
-
Transform the query to produce a better question.
|
| 1301 |
-
Args:
|
| 1302 |
-
state (dict): The current graph state
|
| 1303 |
-
Returns:
|
| 1304 |
-
state (dict): Updates question key with a re-phrased question
|
| 1305 |
-
"""
|
| 1306 |
-
|
| 1307 |
-
print("---TRANSFORM QUERY---")
|
| 1308 |
-
state_dict = state["keys"]
|
| 1309 |
-
question = state_dict["question"]
|
| 1310 |
-
documents = state_dict["documents"]
|
| 1311 |
-
|
| 1312 |
-
# Create a prompt template with format instructions and the query
|
| 1313 |
-
prompt = PromptTemplate(
|
| 1314 |
-
template="""Du generierst Fragen, die optimiert sind für das Retrieval von Dokumenten. \n
|
| 1315 |
-
Schaue auf den input und versuche die zugrundeliegende Absicht / Bedeutung zu bewerten. \n
|
| 1316 |
-
Hier ist die ursprüngliche Frage:
|
| 1317 |
-
\n ------- \n
|
| 1318 |
-
{question}
|
| 1319 |
-
\n ------- \n
|
| 1320 |
-
Formuliere eine verbesserte Frage: """,
|
| 1321 |
-
input_variables=["question"],
|
| 1322 |
-
)
|
| 1323 |
-
|
| 1324 |
-
# Grader
|
| 1325 |
-
model = ChatOpenAI(temperature=0, model="gpt-4-0125-preview", streaming=True)
|
| 1326 |
-
|
| 1327 |
-
# Prompt
|
| 1328 |
-
chain = prompt | model | StrOutputParser()
|
| 1329 |
-
better_question = chain.invoke({"question": question})
|
| 1330 |
-
second_trial="ja"
|
| 1331 |
-
|
| 1332 |
-
return {"keys": {"documents": documents, "question": better_question, "second_trial" : second_trial}}
|
| 1333 |
-
|
| 1334 |
-
#websuche zur Zeit nicht in gebrauch
|
| 1335 |
-
def web_search(state):
|
| 1336 |
-
"""
|
| 1337 |
-
Web search based on the re-phrased question using Tavily API.
|
| 1338 |
-
Args:
|
| 1339 |
-
state (dict): The current graph state
|
| 1340 |
-
Returns:
|
| 1341 |
-
state (dict): Updates documents key with appended web results
|
| 1342 |
-
"""
|
| 1343 |
-
|
| 1344 |
-
print("---WEB Suche---")
|
| 1345 |
-
state_dict = state["keys"]
|
| 1346 |
-
question = state_dict["question"]
|
| 1347 |
-
documents = state_dict["documents"]
|
| 1348 |
-
|
| 1349 |
-
tool = TavilySearchResults()
|
| 1350 |
-
docs = tool.invoke({"query": question})
|
| 1351 |
-
web_results = "\n".join([d["content"] for d in docs])
|
| 1352 |
-
web_results = Document(page_content=web_results)
|
| 1353 |
-
documents.append(web_results)
|
| 1354 |
-
|
| 1355 |
-
return {"keys": {"documents": documents, "question": question}}
|
| 1356 |
-
|
| 1357 |
-
|
| 1358 |
-
### Edges
|
| 1359 |
-
|
| 1360 |
-
|
| 1361 |
-
def decide_to_generate(state):
|
| 1362 |
-
"""
|
| 1363 |
-
Determines whether to generate an answer or re-generate a question for a new retriever question or generate without documents attached
|
| 1364 |
-
Args:
|
| 1365 |
-
state (dict): The current state of the agent, including all keys.
|
| 1366 |
-
Returns:
|
| 1367 |
-
str: Next node to call
|
| 1368 |
-
"""
|
| 1369 |
-
|
| 1370 |
-
print("---ENTSCHEIDE ZU GENERIEREN---")
|
| 1371 |
-
print("current state")
|
| 1372 |
-
print(state["keys"])
|
| 1373 |
-
print("-------------------------------")
|
| 1374 |
-
state_dict = state["keys"]
|
| 1375 |
-
question = state_dict["question"]
|
| 1376 |
-
filtered_documents = state_dict["documents"]
|
| 1377 |
-
search_again = state_dict["search_again"]
|
| 1378 |
-
second_trial=state_dict["second_trial"]
|
| 1379 |
-
|
| 1380 |
-
|
| 1381 |
-
if search_again == "ja" :
|
| 1382 |
-
if (not second_trial == "ja"):
|
| 1383 |
-
# All documents have been filtered check_relevance
|
| 1384 |
-
# We will re-generate a new query
|
| 1385 |
-
print("---ENTSCHEIDUNG: VERÄNDERE DIE FRAGE ---")
|
| 1386 |
-
return "transform_query"
|
| 1387 |
-
else:
|
| 1388 |
-
# keine neue frage, sondern generieren - ohne Dokumente anzuhängen
|
| 1389 |
-
print("---ENTSCHEIDUNG: Generiere ohne Dokumente---")
|
| 1390 |
-
return "generate"
|
| 1391 |
-
else:
|
| 1392 |
-
# We have relevant documents, so generate answer
|
| 1393 |
-
print("---ENTSCHEIDUNG: GENERIERE---")
|
| 1394 |
-
return "generate"
|
|
|
|
| 44 |
from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever
|
| 45 |
from typing import Dict, TypedDict
|
| 46 |
from langchain_core.messages import BaseMessage
|
| 47 |
+
from langchain_community.embeddings.openai import OpenAIEmbeddings
|
| 48 |
from langchain.prompts import PromptTemplate
|
| 49 |
|
| 50 |
|
|
|
|
| 293 |
# LLM
|
| 294 |
model = ChatOpenAI(temperature=0.3, model="gpt-3.5-turbo-1106", streaming=True)
|
| 295 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
# Tool
|
| 297 |
grade_tool_oai = convert_to_openai_tool(grade)
|
| 298 |
|
|
|
|
| 317 |
|
| 318 |
# Chain
|
| 319 |
chain = prompt_gesamt | llm_with_tool | parser_tool
|
| 320 |
+
|
|
|
|
| 321 |
# Score
|
| 322 |
filtered_docs = []
|
| 323 |
for d in documents:
|
|
|
|
| 397 |
|
| 398 |
|
| 399 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
############################################
|
| 401 |
# rag_chain Alternative für RAg mit Bild-Upload, da hier das llm so nicht genutzt werden kann und der prompt mit den RAG Erweiterungen anders übergeben wird
|
| 402 |
#langchain nutzen, um prompt an llm zu leiten, aber vorher in der VektorDB suchen, um passende splits zum Prompt hinzuzufügen
|
|
|
|
| 1024 |
self.canv.restoreState()
|
| 1025 |
|
| 1026 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|