eabybabu commited on
Commit
3cb032f
Β·
verified Β·
1 Parent(s): 1b54cf5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -74
app.py CHANGED
@@ -1,26 +1,13 @@
1
  import os
2
  import gradio as gr
3
  from langchain.chains import RetrievalQA
4
- from langchain_community.vectorstores import Chroma # Use langchain_community for vector stores
5
- from langchain.llms import OpenAI, HuggingFaceHub
6
- from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
7
- from langchain.document_loaders import PyPDFLoader
 
 
8
  import time
9
- from langchain_huggingface import HuggingFaceEndpoint
10
-
11
-
12
- # Fetch API token securely from environment variables
13
- HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACEHUB_API_TOKEN")
14
-
15
- if HUGGINGFACE_API_KEY is None:
16
- raise ValueError("Hugging Face API token is missing! Set it as a secret in Hugging Face Spaces.")
17
-
18
- # Load LLM securely
19
- llm = HuggingFaceEndpoint(
20
- repo_id="google/flan-t5-large",
21
- model_kwargs={"temperature": 0.5, "max_length": 512},
22
- huggingfacehub_api_token=HUGGINGFACE_API_KEY # βœ… Secure API key usage
23
- )
24
 
25
  # Define paths for cybersecurity training PDFs
26
  PDF_FILES = [
@@ -30,9 +17,12 @@ PDF_FILES = [
30
  "NIST_CSWP_04162018.pdf"
31
  ]
32
 
33
- # Choose whether to use OpenAI API (Online) or Hugging Face (Offline)
34
- USE_OPENAI = False # Set to True if using OpenAI API for better responses
 
 
35
 
 
36
  def load_data():
37
  """Loads multiple PDFs and stores embeddings in ChromaDB"""
38
  all_docs = []
@@ -41,27 +31,25 @@ def load_data():
41
  loader = PyPDFLoader(pdf)
42
  all_docs.extend(loader.load())
43
 
44
- # Use OpenAI embeddings (Online) or Hugging Face embeddings (Offline)
45
- if USE_OPENAI:
46
- embeddings = OpenAIEmbeddings()
47
- else:
48
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
49
 
50
  return Chroma.from_documents(all_docs, embeddings)
51
 
52
- # Load the knowledge base from the uploaded PDFs
53
  vector_db = load_data()
54
 
55
- # Select the LLM model (Online: OpenAI | Offline: Hugging Face)
56
- if USE_OPENAI:
57
- llm = OpenAI()
58
- else:
59
- llm = HuggingFaceHub(repo_id="google/flan-t5-large", model_kwargs={"temperature": 0.5, "max_length": 512})
 
 
60
 
61
- # Create Retrieval QA chain for document-based responses
62
  qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vector_db.as_retriever())
63
 
64
- # Function to generate chatbot responses with a typing effect
65
  def chatbot_response(question):
66
  """Handles chatbot queries with a typing effect"""
67
  response = qa_chain.run(question)
@@ -74,59 +62,23 @@ def chatbot_response(question):
74
  # Custom futuristic CSS styling
75
  custom_css = """
76
  body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;}
77
- #chatbot-container {border: 2px solid #00ffff; background: rgba(0, 0, 0, 0.8); padding: 20px; border-radius: 15px;}
78
  .gradio-container {background: linear-gradient(to bottom, #020c1b, #001f3f);}
79
  textarea {background: #011627; color: #0ff; font-size: 18px;}
80
  button {background: #0088ff; color: white; font-size: 20px; border-radius: 5px; border: none; padding: 10px;}
81
  button:hover {background: #00ffff; color: #000;}
82
  """
83
 
84
- # 3D Avatar using Three.js
85
- three_js_html = """
86
- <div id="avatar-container">
87
- <script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
88
- <script>
89
- function create3DAvatar() {
90
- var scene = new THREE.Scene();
91
- var camera = new THREE.PerspectiveCamera(75, 1, 0.1, 1000);
92
- var renderer = new THREE.WebGLRenderer({ alpha: true });
93
- renderer.setSize(300, 300);
94
- document.getElementById('avatar-container').appendChild(renderer.domElement);
95
-
96
- var geometry = new THREE.SphereGeometry(1, 32, 32);
97
- var material = new THREE.MeshStandardMaterial({ color: 0x00ffff, wireframe: true });
98
- var avatar = new THREE.Mesh(geometry, material);
99
- scene.add(avatar);
100
-
101
- var light = new THREE.PointLight(0x00ffff, 1, 100);
102
- light.position.set(2, 2, 5);
103
- scene.add(light);
104
-
105
- camera.position.z = 3;
106
-
107
- function animate() {
108
- requestAnimationFrame(animate);
109
- avatar.rotation.y += 0.01;
110
- renderer.render(scene, camera);
111
- }
112
- animate();
113
- }
114
- window.onload = create3DAvatar;
115
- </script>
116
- </div>
117
- """
118
-
119
- # Create Gradio Chatbot Interface with Custom UI & 3D Avatar
120
  iface = gr.Interface(
121
  fn=chatbot_response,
122
  inputs="text",
123
  outputs="text",
124
  title="πŸ€– Cybersecurity AI Assistant",
125
- description="Ask me about NIST, ISO/IEC 27001, MITRE ATLAS, and ISO/IEC 27005. Powered by AI and real-time 3D visualization.",
126
  theme="default",
127
  css=custom_css,
128
  live=True, # Enables real-time updates for typing effect
129
  )
130
 
131
- # Embed 3D Avatar into the interface
132
- iface.launch(share=True, custom_js=three_js_html)
 
1
  import os
2
  import gradio as gr
3
  from langchain.chains import RetrievalQA
4
+ from langchain_community.vectorstores import Chroma # βœ… Fixed Import
5
+ from langchain.llms import OpenAI
6
+ from langchain_huggingface import HuggingFaceEndpoint # βœ… Corrected Import
7
+ from langchain.embeddings import OpenAIEmbeddings
8
+ from langchain_community.embeddings import HuggingFaceEmbeddings # βœ… Corrected Import
9
+ from langchain_community.document_loaders import PyPDFLoader # βœ… Corrected Import
10
  import time
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Define paths for cybersecurity training PDFs
13
  PDF_FILES = [
 
17
  "NIST_CSWP_04162018.pdf"
18
  ]
19
 
20
+ # Fetch Hugging Face API token securely from environment variables
21
+ HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACEHUB_API_TOKEN")
22
+ if HUGGINGFACE_API_KEY is None:
23
+ raise ValueError("❌ Hugging Face API token is missing! Set it in Hugging Face Spaces Secrets.")
24
 
25
+ # Load PDFs into ChromaDB
26
  def load_data():
27
  """Loads multiple PDFs and stores embeddings in ChromaDB"""
28
  all_docs = []
 
31
  loader = PyPDFLoader(pdf)
32
  all_docs.extend(loader.load())
33
 
34
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") # βœ… Use updated embeddings
 
 
 
 
35
 
36
  return Chroma.from_documents(all_docs, embeddings)
37
 
38
+ # Load the knowledge base
39
  vector_db = load_data()
40
 
41
+ # Load LLM from Hugging Face securely
42
+ llm = HuggingFaceEndpoint(
43
+ repo_id="google/flan-t5-large",
44
+ temperature=0.5, # βœ… Explicitly set temperature
45
+ max_length=512, # βœ… Explicitly set max_length
46
+ huggingfacehub_api_token=HUGGINGFACE_API_KEY
47
+ )
48
 
49
+ # Create Retrieval QA chain
50
  qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vector_db.as_retriever())
51
 
52
+ # Function to simulate futuristic typing effect
53
  def chatbot_response(question):
54
  """Handles chatbot queries with a typing effect"""
55
  response = qa_chain.run(question)
 
62
  # Custom futuristic CSS styling
63
  custom_css = """
64
  body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;}
 
65
  .gradio-container {background: linear-gradient(to bottom, #020c1b, #001f3f);}
66
  textarea {background: #011627; color: #0ff; font-size: 18px;}
67
  button {background: #0088ff; color: white; font-size: 20px; border-radius: 5px; border: none; padding: 10px;}
68
  button:hover {background: #00ffff; color: #000;}
69
  """
70
 
71
+ # Create Gradio Chatbot Interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  iface = gr.Interface(
73
  fn=chatbot_response,
74
  inputs="text",
75
  outputs="text",
76
  title="πŸ€– Cybersecurity AI Assistant",
77
+ description="Ask me about NIST, ISO/IEC 27001, MITRE ATLAS, and ISO/IEC 27005. Powered by AI.",
78
  theme="default",
79
  css=custom_css,
80
  live=True, # Enables real-time updates for typing effect
81
  )
82
 
83
+ # Launch chatbot
84
+ iface.launch()