prernajeet01 commited on
Commit
8eb9de0
·
verified ·
1 Parent(s): b542e4e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -0
app.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from langchain.document_loaders import PyPDFLoader
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain.embeddings import OpenAIEmbeddings
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from langchain.memory import ConversationBufferMemory
9
+ from langchain.chat_models import ChatOpenAI
10
+
11
+ class AuditCopilot:
12
+ def __init__(self):
13
+ # Hardcoded OpenAI API key
14
+ self.openai_api_key = "your-api-key-here" # Replace with your actual API key
15
+ self.vector_store = None
16
+ self.chain = None
17
+ self.chat_history = []
18
+ # Initialize the system with the PDF
19
+ self.initialize_system()
20
+
21
+ def initialize_system(self):
22
+ """Initialize the system with the pre-loaded PDF"""
23
+ try:
24
+ # Path to your PDF file in the same directory as the script
25
+ pdf_path = "guidelines.pdf" # Replace with your PDF filename
26
+
27
+ # Load and split document
28
+ loader = PyPDFLoader(pdf_path)
29
+ documents = loader.load()
30
+ text_splitter = RecursiveCharacterTextSplitter(
31
+ chunk_size=1000,
32
+ chunk_overlap=200
33
+ )
34
+ splits = text_splitter.split_documents(documents)
35
+
36
+ # Create vector store
37
+ embeddings = OpenAIEmbeddings(openai_api_key=self.openai_api_key)
38
+ self.vector_store = FAISS.from_documents(splits, embeddings)
39
+
40
+ # Initialize conversation chain with GPT-3.5-turbo
41
+ llm = ChatOpenAI(
42
+ model_name="gpt-3.5-turbo",
43
+ temperature=0,
44
+ openai_api_key=self.openai_api_key
45
+ )
46
+ memory = ConversationBufferMemory(
47
+ memory_key="chat_history",
48
+ return_messages=True
49
+ )
50
+ self.chain = ConversationalRetrievalChain.from_llm(
51
+ llm=llm,
52
+ retriever=self.vector_store.as_retriever(),
53
+ memory=memory
54
+ )
55
+
56
+ print("System initialized successfully!")
57
+
58
+ except Exception as e:
59
+ print(f"Error initializing system: {str(e)}")
60
+
61
+ def get_response(self, question):
62
+ """Get response from the chain"""
63
+ try:
64
+ response = self.chain({"question": question})
65
+ self.chat_history.append((question, response['answer']))
66
+ return response['answer']
67
+ except Exception as e:
68
+ return f"Error generating response: {str(e)}"
69
+
70
+ def create_gradio_interface():
71
+ """Create Gradio interface"""
72
+ copilot = AuditCopilot()
73
+
74
+ with gr.Blocks() as demo:
75
+ gr.Markdown("# Audit Copilot")
76
+ gr.Markdown("Ask questions about the audit guidelines!")
77
+
78
+ # Chat section
79
+ chatbot = gr.Chatbot(label="Conversation")
80
+ msg = gr.Textbox(label="Ask a question")
81
+ clear = gr.Button("Clear")
82
+
83
+ def respond(message, chat_history):
84
+ bot_message = copilot.get_response(message)
85
+ chat_history.append((message, bot_message))
86
+ return "", chat_history
87
+
88
+ # Connect components
89
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
90
+ clear.click(lambda: None, None, chatbot, queue=False)
91
+
92
+ return demo
93
+
94
+ if __name__ == "__main__":
95
+ try:
96
+ demo = create_gradio_interface()
97
+ demo.launch()
98
+ except Exception as e:
99
+ print(f"Error launching application: {str(e)}")