Spaces:
Runtime error
Runtime error
File size: 3,058 Bytes
a77ed8b 15f2a65 a77ed8b 3cb032f a77ed8b 15f2a65 67462b2 15f2a65 3cb032f 15f2a65 3cb032f a77ed8b 67462b2 a77ed8b 15f2a65 3cb032f 15f2a65 a77ed8b 15f2a65 3cb032f a77ed8b 15f2a65 3cb032f 15f2a65 3cb032f a77ed8b 15f2a65 3cb032f a77ed8b 15f2a65 67462b2 a77ed8b 15f2a65 a77ed8b 3cb032f a77ed8b 67462b2 3cb032f a77ed8b 15f2a65 3cb032f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | import os
import gradio as gr
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma # β
Fixed Import
from langchain.llms import OpenAI
from langchain_huggingface import HuggingFaceEndpoint # β
Corrected Import
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.embeddings import HuggingFaceEmbeddings # β
Corrected Import
from langchain_community.document_loaders import PyPDFLoader # β
Corrected Import
import time
# Define paths for cybersecurity training PDFs
PDF_FILES = [
"ISOIEC 27001_2ef522.pdf",
"ISO-IEC-27005-2022.pdf",
"MITRE ATLAS Overview Combined_v1.pdf",
"NIST_CSWP_04162018.pdf"
]
# Fetch Hugging Face API token securely from environment variables
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if HUGGINGFACE_API_KEY is None:
raise ValueError("β Hugging Face API token is missing! Set it in Hugging Face Spaces Secrets.")
# Load PDFs into ChromaDB
def load_data():
"""Loads multiple PDFs and stores embeddings in ChromaDB"""
all_docs = []
for pdf in PDF_FILES:
if os.path.exists(pdf): # Ensure the PDF exists in the Hugging Face Space
loader = PyPDFLoader(pdf)
all_docs.extend(loader.load())
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") # β
Use updated embeddings
return Chroma.from_documents(all_docs, embeddings)
# Load the knowledge base
vector_db = load_data()
# Load LLM from Hugging Face securely
llm = HuggingFaceEndpoint(
repo_id="google/flan-t5-large",
temperature=0.5, # β
Explicitly set temperature
max_length=512, # β
Explicitly set max_length
huggingfacehub_api_token=HUGGINGFACE_API_KEY
)
# Create Retrieval QA chain
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vector_db.as_retriever())
# Function to simulate futuristic typing effect
def chatbot_response(question):
"""Handles chatbot queries with a typing effect"""
response = qa_chain.run(question)
displayed_response = ""
for char in response:
displayed_response += char
time.sleep(0.02) # Simulate typing delay
yield displayed_response
# Custom futuristic CSS styling
custom_css = """
body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;}
.gradio-container {background: linear-gradient(to bottom, #020c1b, #001f3f);}
textarea {background: #011627; color: #0ff; font-size: 18px;}
button {background: #0088ff; color: white; font-size: 20px; border-radius: 5px; border: none; padding: 10px;}
button:hover {background: #00ffff; color: #000;}
"""
# Create Gradio Chatbot Interface
iface = gr.Interface(
fn=chatbot_response,
inputs="text",
outputs="text",
title="π€ Cybersecurity AI Assistant",
description="Ask me about NIST, ISO/IEC 27001, MITRE ATLAS, and ISO/IEC 27005. Powered by AI.",
theme="default",
css=custom_css,
live=True, # Enables real-time updates for typing effect
)
# Launch chatbot
iface.launch()
|