eabybabu's picture
Update app.py
67462b2 verified
raw
history blame
4.24 kB
import os
import gradio as gr
from langchain.chains import RetrievalQA
from langchain.vectorstores import Chroma
from langchain.llms import OpenAI, HuggingFaceHub
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
from langchain.document_loaders import PyPDFLoader
import time
# Define paths for cybersecurity training PDFs
PDF_FILES = [
"ISOIEC 27001_2ef522.pdf",
"ISO-IEC-27005-2022.pdf",
"MITRE ATLAS Overview Combined_v1.pdf",
"NIST_CSWP_04162018.pdf"
]
# Choose whether to use OpenAI API (Online) or Hugging Face (Offline)
USE_OPENAI = False # Set to True if using OpenAI API for better responses
def load_data():
"""Loads multiple PDFs and stores embeddings in ChromaDB"""
all_docs = []
for pdf in PDF_FILES:
if os.path.exists(pdf): # Ensure the PDF exists in the Hugging Face Space
loader = PyPDFLoader(pdf)
all_docs.extend(loader.load())
# Use OpenAI embeddings (Online) or Hugging Face embeddings (Offline)
if USE_OPENAI:
embeddings = OpenAIEmbeddings()
else:
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
return Chroma.from_documents(all_docs, embeddings)
# Load the knowledge base from the uploaded PDFs
vector_db = load_data()
# Select the LLM model (Online: OpenAI | Offline: Hugging Face)
if USE_OPENAI:
llm = OpenAI()
else:
llm = HuggingFaceHub(repo_id="google/flan-t5-large", model_kwargs={"temperature": 0.5, "max_length": 512})
# Create Retrieval QA chain for document-based responses
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vector_db.as_retriever())
# Function to generate chatbot responses with a typing effect
def chatbot_response(question):
"""Handles chatbot queries with a typing effect"""
response = qa_chain.run(question)
displayed_response = ""
for char in response:
displayed_response += char
time.sleep(0.02) # Simulate typing delay
yield displayed_response
# Custom futuristic CSS styling
custom_css = """
body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;}
#chatbot-container {border: 2px solid #00ffff; background: rgba(0, 0, 0, 0.8); padding: 20px; border-radius: 15px;}
.gradio-container {background: linear-gradient(to bottom, #020c1b, #001f3f);}
textarea {background: #011627; color: #0ff; font-size: 18px;}
button {background: #0088ff; color: white; font-size: 20px; border-radius: 5px; border: none; padding: 10px;}
button:hover {background: #00ffff; color: #000;}
"""
# 3D Avatar using Three.js
three_js_html = """
<div id="avatar-container">
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
<script>
function create3DAvatar() {
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(75, 1, 0.1, 1000);
var renderer = new THREE.WebGLRenderer({ alpha: true });
renderer.setSize(300, 300);
document.getElementById('avatar-container').appendChild(renderer.domElement);
var geometry = new THREE.SphereGeometry(1, 32, 32);
var material = new THREE.MeshStandardMaterial({ color: 0x00ffff, wireframe: true });
var avatar = new THREE.Mesh(geometry, material);
scene.add(avatar);
var light = new THREE.PointLight(0x00ffff, 1, 100);
light.position.set(2, 2, 5);
scene.add(light);
camera.position.z = 3;
function animate() {
requestAnimationFrame(animate);
avatar.rotation.y += 0.01;
renderer.render(scene, camera);
}
animate();
}
window.onload = create3DAvatar;
</script>
</div>
"""
# Create Gradio Chatbot Interface with Custom UI & 3D Avatar
iface = gr.Interface(
fn=chatbot_response,
inputs="text",
outputs="text",
title="πŸ€– Cybersecurity AI Assistant",
description="Ask me about NIST, ISO/IEC 27001, MITRE ATLAS, and ISO/IEC 27005. Powered by AI and real-time 3D visualization.",
theme="default",
css=custom_css,
live=True, # Enables real-time updates for typing effect
)
# Embed 3D Avatar into the interface
iface.launch(share=True, custom_js=three_js_html)