Update app.py
Browse files
app.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from langchain.chains import RagChain
|
| 3 |
from langchain.vectorstores import Chroma
|
| 4 |
from transformers import RagTokenizer, RagSequenceForGeneration
|
| 5 |
from sentence_transformers import SentenceTransformer
|
|
|
|
|
|
|
| 6 |
|
| 7 |
#Konstanten
|
| 8 |
ANTI_BOT_PW = os.getenv("CORRECT_VALIDATE")
|
|
@@ -20,11 +22,16 @@ model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use
|
|
| 20 |
# Verbindung zur Chroma DB und Laden der Dokumente
|
| 21 |
chroma_db = Chroma(embedding_model=embedding_model, persist_directory = PATH_WORK + CHROMA_DIR)
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
# Erstellen eines eigenen Retrievers mit Chroma DB und Embeddings
|
| 24 |
-
retriever = chroma_db.as_retriever()
|
| 25 |
|
| 26 |
# Erstellung der RAG-Kette mit dem benutzerdefinierten Retriever
|
| 27 |
-
rag_chain = RagChain(model=model, retriever=retriever, tokenizer=tokenizer, vectorstore=chroma_db)
|
| 28 |
#############################################
|
| 29 |
|
| 30 |
|
|
@@ -45,23 +52,25 @@ def document_retrieval_chroma2():
|
|
| 45 |
|
| 46 |
|
| 47 |
|
| 48 |
-
def get_rag_response(
|
| 49 |
global rag_chain
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
|
|
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
def chatbot_response (user_input, chat_history=[]):
|
|
|
|
| 1 |
+
import os
|
| 2 |
import gradio as gr
|
|
|
|
| 3 |
from langchain.vectorstores import Chroma
|
| 4 |
from transformers import RagTokenizer, RagSequenceForGeneration
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
+
from langchain.chains.question_answering import load_qa_chain
|
| 7 |
+
from langchain.llms import HuggingFaceLLM
|
| 8 |
|
| 9 |
#Konstanten
|
| 10 |
ANTI_BOT_PW = os.getenv("CORRECT_VALIDATE")
|
|
|
|
| 22 |
# Verbindung zur Chroma DB und Laden der Dokumente
|
| 23 |
chroma_db = Chroma(embedding_model=embedding_model, persist_directory = PATH_WORK + CHROMA_DIR)
|
| 24 |
|
| 25 |
+
# Erstellung eines HuggingFaceLLM Modells
|
| 26 |
+
llm = HuggingFaceLLM(model=model, tokenizer=tokenizer)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
# Erstellen eines eigenen Retrievers mit Chroma DB und Embeddings
|
| 31 |
+
#retriever = chroma_db.as_retriever()
|
| 32 |
|
| 33 |
# Erstellung der RAG-Kette mit dem benutzerdefinierten Retriever
|
| 34 |
+
#rag_chain = RagChain(model=model, retriever=retriever, tokenizer=tokenizer, vectorstore=chroma_db)
|
| 35 |
#############################################
|
| 36 |
|
| 37 |
|
|
|
|
| 52 |
|
| 53 |
|
| 54 |
|
| 55 |
+
def get_rag_response(question):
|
| 56 |
global rag_chain
|
| 57 |
+
|
| 58 |
+
# Abfrage der relevanten Dokumente aus Chroma DB
|
| 59 |
+
docs = chroma_db.search(question, top_k=5)
|
| 60 |
+
passages = [doc['text'] for doc in docs]
|
| 61 |
+
links = [doc.get('url', 'No URL available') for doc in docs]
|
| 62 |
+
|
| 63 |
+
# Generieren der Antwort
|
| 64 |
+
answer = llm(question, docs)
|
| 65 |
+
|
| 66 |
+
# Zusammenstellen der Ausgabe
|
| 67 |
+
response = {
|
| 68 |
+
"answer": answer,
|
| 69 |
+
"documents": [{"link": link, "passage": passage} for link, passage in zip(links, passages)]
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
return response
|
| 73 |
+
|
| 74 |
|
| 75 |
|
| 76 |
def chatbot_response (user_input, chat_history=[]):
|