eabybabu commited on
Commit
67462b2
Β·
verified Β·
1 Parent(s): e5aa01d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -14
app.py CHANGED
@@ -7,21 +7,26 @@ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
7
  from langchain.document_loaders import PyPDFLoader
8
  import time
9
 
10
- # Define paths for cybersecurity documents (Add your PDFs here)
11
- PDF_FILES = ["NIST_CSWP_04162018.pdf", "ISOIEC 27001_2ef522.pdf", "MITRE ATLAS Overview Combined_v1.pdf", "ISO-IEC-27005-2022.pdf"]
 
 
 
 
 
12
 
13
- # Choose LLM Model (Switch between OpenAI and Hugging Face)
14
- USE_OPENAI = False # Change to True if you prefer OpenAI API
15
 
16
  def load_data():
17
  """Loads multiple PDFs and stores embeddings in ChromaDB"""
18
  all_docs = []
19
  for pdf in PDF_FILES:
20
- if os.path.exists(pdf):
21
  loader = PyPDFLoader(pdf)
22
  all_docs.extend(loader.load())
23
 
24
- # Use OpenAI or Hugging Face embeddings
25
  if USE_OPENAI:
26
  embeddings = OpenAIEmbeddings()
27
  else:
@@ -29,19 +34,19 @@ def load_data():
29
 
30
  return Chroma.from_documents(all_docs, embeddings)
31
 
32
- # Load Vector Database
33
  vector_db = load_data()
34
 
35
- # Select LLM model (Online: OpenAI | Offline: Hugging Face)
36
  if USE_OPENAI:
37
  llm = OpenAI()
38
  else:
39
  llm = HuggingFaceHub(repo_id="google/flan-t5-large", model_kwargs={"temperature": 0.5, "max_length": 512})
40
 
41
- # Create Retrieval QA chain
42
  qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vector_db.as_retriever())
43
 
44
- # Function to simulate futuristic typing effect
45
  def chatbot_response(question):
46
  """Handles chatbot queries with a typing effect"""
47
  response = qa_chain.run(question)
@@ -51,7 +56,7 @@ def chatbot_response(question):
51
  time.sleep(0.02) # Simulate typing delay
52
  yield displayed_response
53
 
54
- # Custom futuristic CSS style
55
  custom_css = """
56
  body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;}
57
  #chatbot-container {border: 2px solid #00ffff; background: rgba(0, 0, 0, 0.8); padding: 20px; border-radius: 15px;}
@@ -96,13 +101,13 @@ three_js_html = """
96
  </div>
97
  """
98
 
99
- # Create Gradio Interface with Custom Styling and 3D Avatar
100
  iface = gr.Interface(
101
  fn=chatbot_response,
102
  inputs="text",
103
  outputs="text",
104
- title="πŸ€– Cybernetic AI: Your Cybersecurity Assistant",
105
- description="Ask me about NIST, ISO/IEC 27001, MITRE ATT&CK, and ISO/IEC 27005. Now with a 3D Avatar!",
106
  theme="default",
107
  css=custom_css,
108
  live=True, # Enables real-time updates for typing effect
 
7
  from langchain.document_loaders import PyPDFLoader
8
  import time
9
 
10
+ # Define paths for cybersecurity training PDFs
11
+ PDF_FILES = [
12
+ "ISOIEC 27001_2ef522.pdf",
13
+ "ISO-IEC-27005-2022.pdf",
14
+ "MITRE ATLAS Overview Combined_v1.pdf",
15
+ "NIST_CSWP_04162018.pdf"
16
+ ]
17
 
18
+ # Choose whether to use OpenAI API (Online) or Hugging Face (Offline)
19
+ USE_OPENAI = False # Set to True if using OpenAI API for better responses
20
 
21
  def load_data():
22
  """Loads multiple PDFs and stores embeddings in ChromaDB"""
23
  all_docs = []
24
  for pdf in PDF_FILES:
25
+ if os.path.exists(pdf): # Ensure the PDF exists in the Hugging Face Space
26
  loader = PyPDFLoader(pdf)
27
  all_docs.extend(loader.load())
28
 
29
+ # Use OpenAI embeddings (Online) or Hugging Face embeddings (Offline)
30
  if USE_OPENAI:
31
  embeddings = OpenAIEmbeddings()
32
  else:
 
34
 
35
  return Chroma.from_documents(all_docs, embeddings)
36
 
37
+ # Load the knowledge base from the uploaded PDFs
38
  vector_db = load_data()
39
 
40
+ # Select the LLM model (Online: OpenAI | Offline: Hugging Face)
41
  if USE_OPENAI:
42
  llm = OpenAI()
43
  else:
44
  llm = HuggingFaceHub(repo_id="google/flan-t5-large", model_kwargs={"temperature": 0.5, "max_length": 512})
45
 
46
+ # Create Retrieval QA chain for document-based responses
47
  qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vector_db.as_retriever())
48
 
49
+ # Function to generate chatbot responses with a typing effect
50
  def chatbot_response(question):
51
  """Handles chatbot queries with a typing effect"""
52
  response = qa_chain.run(question)
 
56
  time.sleep(0.02) # Simulate typing delay
57
  yield displayed_response
58
 
59
+ # Custom futuristic CSS styling
60
  custom_css = """
61
  body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;}
62
  #chatbot-container {border: 2px solid #00ffff; background: rgba(0, 0, 0, 0.8); padding: 20px; border-radius: 15px;}
 
101
  </div>
102
  """
103
 
104
+ # Create Gradio Chatbot Interface with Custom UI & 3D Avatar
105
  iface = gr.Interface(
106
  fn=chatbot_response,
107
  inputs="text",
108
  outputs="text",
109
+ title="πŸ€– Cybersecurity AI Assistant",
110
+ description="Ask me about NIST, ISO/IEC 27001, MITRE ATLAS, and ISO/IEC 27005. Powered by AI and real-time 3D visualization.",
111
  theme="default",
112
  css=custom_css,
113
  live=True, # Enables real-time updates for typing effect