eabybabu's picture
Update app.py
3cb032f verified
raw
history blame
3.06 kB
import os
import gradio as gr
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma # βœ… Fixed Import
from langchain.llms import OpenAI
from langchain_huggingface import HuggingFaceEndpoint # βœ… Corrected Import
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.embeddings import HuggingFaceEmbeddings # βœ… Corrected Import
from langchain_community.document_loaders import PyPDFLoader # βœ… Corrected Import
import time
# Define paths for cybersecurity training PDFs
PDF_FILES = [
"ISOIEC 27001_2ef522.pdf",
"ISO-IEC-27005-2022.pdf",
"MITRE ATLAS Overview Combined_v1.pdf",
"NIST_CSWP_04162018.pdf"
]
# Fetch Hugging Face API token securely from environment variables
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if HUGGINGFACE_API_KEY is None:
raise ValueError("❌ Hugging Face API token is missing! Set it in Hugging Face Spaces Secrets.")
# Load PDFs into ChromaDB
def load_data():
"""Loads multiple PDFs and stores embeddings in ChromaDB"""
all_docs = []
for pdf in PDF_FILES:
if os.path.exists(pdf): # Ensure the PDF exists in the Hugging Face Space
loader = PyPDFLoader(pdf)
all_docs.extend(loader.load())
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") # βœ… Use updated embeddings
return Chroma.from_documents(all_docs, embeddings)
# Load the knowledge base
vector_db = load_data()
# Load LLM from Hugging Face securely
llm = HuggingFaceEndpoint(
repo_id="google/flan-t5-large",
temperature=0.5, # βœ… Explicitly set temperature
max_length=512, # βœ… Explicitly set max_length
huggingfacehub_api_token=HUGGINGFACE_API_KEY
)
# Create Retrieval QA chain
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vector_db.as_retriever())
# Function to simulate futuristic typing effect
def chatbot_response(question):
"""Handles chatbot queries with a typing effect"""
response = qa_chain.run(question)
displayed_response = ""
for char in response:
displayed_response += char
time.sleep(0.02) # Simulate typing delay
yield displayed_response
# Custom futuristic CSS styling
custom_css = """
body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;}
.gradio-container {background: linear-gradient(to bottom, #020c1b, #001f3f);}
textarea {background: #011627; color: #0ff; font-size: 18px;}
button {background: #0088ff; color: white; font-size: 20px; border-radius: 5px; border: none; padding: 10px;}
button:hover {background: #00ffff; color: #000;}
"""
# Create Gradio Chatbot Interface
iface = gr.Interface(
fn=chatbot_response,
inputs="text",
outputs="text",
title="πŸ€– Cybersecurity AI Assistant",
description="Ask me about NIST, ISO/IEC 27001, MITRE ATLAS, and ISO/IEC 27005. Powered by AI.",
theme="default",
css=custom_css,
live=True, # Enables real-time updates for typing effect
)
# Launch chatbot
iface.launch()