|
|
import streamlit as st |
|
|
from src.helper import download_hugging_face_embeddings |
|
|
from langchain.vectorstores import FAISS |
|
|
from langchain.schema import Document |
|
|
from langchain.llms import CTransformers |
|
|
from langchain.chains import RetrievalQA |
|
|
from dotenv import load_dotenv |
|
|
import os |
|
|
|
|
|
app = Flask(__name__) |
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
embeddings = download_hugging_face_embeddings() |
|
|
|
|
|
|
|
|
documents = [Document(page_content="dummy", embedding=embedding) for embedding in embeddings] |
|
|
|
|
|
|
|
|
vector_store = FAISS.from_documents(documents, embeddings) |
|
|
|
|
|
|
|
|
llm = CTransformers(model="E:\\project\\Medical-Chatbot\\llama-2-7b-chat.ggmlv3.q4_0.bin", model_type="llama", config={'max_new_tokens': 512, 'temperature': 0.8}) |
|
|
|
|
|
|
|
|
qa = RetrievalQA.from_chain_type( |
|
|
llm=llm, |
|
|
chain_type="stuff", |
|
|
retriever=vector_store.as_retriever(search_kwargs={'k': 2}), |
|
|
return_source_documents=True |
|
|
) |
|
|
|
|
|
@app.route("/") |
|
|
def index(): |
|
|
return render_template('chat.html') |
|
|
|
|
|
@app.route("/get", methods=["GET", "POST"]) |
|
|
def chat(): |
|
|
msg = request.form["msg"] |
|
|
input = msg |
|
|
print(input) |
|
|
result = qa({"query": input}) |
|
|
print("Response : ", result["result"]) |
|
|
return str(result["result"]) |
|
|
|
|
|
if __name__ == '__main__': |
|
|
app.run(host="0.0.0.0", port=8080, debug=True) |
|
|
|