Nahiyan14 commited on
Commit
b7d7e4b
·
verified ·
1 Parent(s): 3458ca8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py CHANGED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template, jsonify, request
2
+ from src.helper import download_hugging_face_embeddings
3
+ from langchain_pinecone import PineconeVectorStore
4
+ from langchain_openai import OpenAI
5
+ from langchain.chains import create_retrieval_chain
6
+ from langchain.chains.combine_documents import create_stuff_documents_chain
7
+ from langchain_core.prompts import ChatPromptTemplate
8
+ from dotenv import load_dotenv
9
+ from src.prompt import *
10
+ import os
11
+
12
+ app = Flask(__name__)
13
+
14
+ # Load environment variables - these will be set in Hugging Face Space secrets
15
+ load_dotenv() # Still useful for local development
16
+
17
+ PINECONE_API_KEY = os.environ.get('PINECONE_API_KEY')
18
+ OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
19
+
20
+ if not PINECONE_API_KEY or not OPENAI_API_KEY:
21
+ raise ValueError("Missing PINECONE_API_KEY or OPENAI_API_KEY")
22
+
23
+ os.environ["PINECONE_API_KEY"] = PINECONE_API_KEY
24
+ os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
25
+
26
+ # Initialize embeddings and chain at startup
27
+ embeddings = None
28
+ rag_chain = None
29
+
30
+ def initialize_chain():
31
+ global embeddings, rag_chain
32
+ try:
33
+ embeddings = download_hugging_face_embeddings()
34
+ index_name = "medprep"
35
+
36
+ # Embed each chunk and upsert the embeddings into your Pinecone index.
37
+ docsearch = PineconeVectorStore.from_existing_index(
38
+ index_name=index_name,
39
+ embedding=embeddings
40
+ )
41
+
42
+ retriever = docsearch.as_retriever(search_type="similarity", search_kwargs={"k":3})
43
+
44
+ llm = OpenAI(temperature=0.4, max_tokens=500)
45
+ prompt = ChatPromptTemplate.from_messages(
46
+ [
47
+ ("system", system_prompt),
48
+ ("human", "{input}"),
49
+ ]
50
+ )
51
+
52
+ question_answer_chain = create_stuff_documents_chain(llm, prompt)
53
+ rag_chain = create_retrieval_chain(retriever, question_answer_chain)
54
+ print("RAG chain initialized successfully")
55
+ return True
56
+ except Exception as e:
57
+ print(f"Failed to initialize RAG chain: {e}")
58
+ return False
59
+
60
+ # Initialize the chain when the application starts
61
+ initialize_chain()
62
+
63
+ @app.route("/")
64
+ def index():
65
+ return render_template('chat.html')
66
+
67
+ @app.route("/get", methods=["GET", "POST"])
68
+ def chat():
69
+ global rag_chain
70
+
71
+ # Make sure chain is initialized
72
+ if rag_chain is None:
73
+ if not initialize_chain():
74
+ return "Error: System not initialized properly. Please check the logs."
75
+
76
+ msg = request.form["msg"]
77
+ try:
78
+ response = rag_chain.invoke({"input": msg})
79
+ return str(response["answer"])
80
+ except Exception as e:
81
+ print(f"Error processing request: {e}")
82
+ return f"Error: {str(e)}"
83
+
84
+ # Health check endpoint for monitoring
85
+ @app.route("/health")
86
+ def health_check():
87
+ return jsonify({"status": "healthy"})
88
+
89
+ if __name__ == '__main__':
90
+ # Get the port from the environment variable - Hugging Face uses 7860
91
+ port = int(os.environ.get("PORT", 7860))
92
+ app.run(host="0.0.0.0", port=port, debug=False)