hema05core commited on
Commit
dd5caaa
·
verified ·
1 Parent(s): 3fc8a5c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -15
app.py CHANGED
@@ -1,31 +1,40 @@
 
1
  import gradio as gr
2
 
3
- # ✅ Use langchain_community imports
4
- from langchain.text_splitter import CharacterTextSplitter # stays in main langchain
5
  from langchain_community.embeddings import HuggingFaceEmbeddings
6
  from langchain_community.vectorstores import FAISS
7
- from langchain.chains import ConversationalRetrievalChain # stays in main langchain
8
  from langchain_community.llms import HuggingFaceHub
9
  from langchain_community.document_loaders import PyPDFLoader
10
 
11
- # 1️⃣ Load your PDF
12
- # Make sure chimera.pdf is uploaded in the Files tab of Hugging Face Space
13
- loader = PyPDFLoader("chimera.pdf")
 
14
  documents = loader.load()
15
 
16
- # 2️⃣ Split into chunks
17
  text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=100)
18
  texts = text_splitter.split_documents(documents)
19
 
20
- # 3️⃣ Create embeddings + vector DB
21
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
22
  db = FAISS.from_documents(texts, embeddings)
23
 
24
- # 4️⃣ Build retriever-based chatbot
25
  retriever = db.as_retriever(search_kwargs={"k": 3})
26
 
 
 
 
 
 
 
 
27
  qa = ConversationalRetrievalChain.from_llm(
28
- HuggingFaceHub(repo_id="google/flan-t5-large", model_kwargs={"temperature":0}),
29
  retriever=retriever
30
  )
31
 
@@ -33,13 +42,54 @@ chat_history = []
33
 
34
  def respond(message, history):
35
  global chat_history
 
36
  result = qa({"question": message, "chat_history": chat_history})
37
  chat_history.append((message, result["answer"]))
38
- return result["answer"]
39
 
40
- # 5️⃣ Simple Gradio UI
41
- chatbot = gr.ChatInterface(respond)
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- if __name__ == "__main__":
44
- chatbot.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import gradio as gr
3
 
4
+ # ✅ LangChain imports
5
+ from langchain.text_splitter import CharacterTextSplitter
6
  from langchain_community.embeddings import HuggingFaceEmbeddings
7
  from langchain_community.vectorstores import FAISS
8
+ from langchain.chains import ConversationalRetrievalChain
9
  from langchain_community.llms import HuggingFaceHub
10
  from langchain_community.document_loaders import PyPDFLoader
11
 
12
+ # --- 1️⃣ Load your PDF ---
13
+ current_dir = os.path.dirname(__file__)
14
+ pdf_path = os.path.join(current_dir, "chimera.pdf")
15
+ loader = PyPDFLoader(pdf_path)
16
  documents = loader.load()
17
 
18
+ # --- 2️⃣ Split into chunks ---
19
  text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=100)
20
  texts = text_splitter.split_documents(documents)
21
 
22
+ # --- 3️⃣ Create embeddings + FAISS vector store ---
23
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
24
  db = FAISS.from_documents(texts, embeddings)
25
 
26
+ # --- 4️⃣ Build retriever-based chatbot ---
27
  retriever = db.as_retriever(search_kwargs={"k": 3})
28
 
29
+ # --- Hugging Face Hub LLM setup with secret token ---
30
+ llm = HuggingFaceHub(
31
+ repo_id="google/flan-t5-base", # smaller for faster response
32
+ model_kwargs={"temperature":0},
33
+ huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN")
34
+ )
35
+
36
  qa = ConversationalRetrievalChain.from_llm(
37
+ llm,
38
  retriever=retriever
39
  )
40
 
 
42
 
43
  def respond(message, history):
44
  global chat_history
45
+ chat_history = chat_history[-6:] # keep last 3 exchanges
46
  result = qa({"question": message, "chat_history": chat_history})
47
  chat_history.append((message, result["answer"]))
48
+ return result["answer"], chat_history
49
 
50
+ # --- 5️⃣ Gradio Blocks UI with Entry Warning ---
51
+ with gr.Blocks() as demo:
52
+ with gr.Column():
53
+ # Warning message
54
+ warning_text = gr.HTML(
55
+ """
56
+ <div style="background-color:black;color:white;padding:20px;font-family:monospace;font-size:18px;">
57
+ ⚠ WARNING — INVESTIGATIVE SIMULATION ⚠<br><br>
58
+ You are about to enter <b>The Chimera Case</b>, a high-stakes investigation into Innovate Future Labs (IFL) and Project Chimera.<br>
59
+ The scenario contains allegations, leaked files, and disputed testimonies. Treat every claim as unverified until verified by evidence.<br>
60
+ Your decisions and observations will guide your understanding of the case.<br><br>
61
+ Are you ready to proceed?
62
+ </div>
63
+ """
64
+ )
65
 
66
+ # Buttons
67
+ enter_btn = gr.Button("Enter the Case")
68
+ exit_btn = gr.Button("Exit")
69
+
70
+ # Chatbot (hidden initially)
71
+ chatbot = gr.Chatbot(visible=False)
72
+ user_input = gr.Textbox(placeholder="Type your message here...", visible=False)
73
+ submit_btn = gr.Button("Send", visible=False)
74
+
75
+ # Button interactions
76
+ def enter_case():
77
+ chatbot.visible = True
78
+ user_input.visible = True
79
+ submit_btn.visible = True
80
+ warning_text.update(value="") # hide warning
81
+ enter_btn.visible = False
82
+ exit_btn.visible = False
83
+ return gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
84
 
85
+ def exit_case():
86
+ return gr.update(value="<h2>Session ended. You exited the simulation.</h2>"), None, None
87
+
88
+ enter_btn.click(enter_case)
89
+ exit_btn.click(exit_case)
90
+
91
+ submit_btn.click(respond, inputs=[user_input, chatbot], outputs=[chatbot, chatbot])
92
+
93
+ # --- 6️⃣ Launch ---
94
+ if __name__ == "__main__":
95
+ demo.launch(share=True, enable_queue=True)