Spaces:
Runtime error
Runtime error
File size: 3,090 Bytes
a77ed8b 15f2a65 a77ed8b 301bbfc bbe760a 301bbfc a77ed8b 15f2a65 e77844a 67462b2 15f2a65 3cb032f 15f2a65 3cb032f a77ed8b 67462b2 a77ed8b 15f2a65 bbe760a 15f2a65 a77ed8b 15f2a65 3cb032f a77ed8b 15f2a65 3cb032f 301bbfc 3cb032f 15f2a65 3cb032f a77ed8b 15f2a65 3cb032f a77ed8b bbe760a a77ed8b 15f2a65 67462b2 a77ed8b 15f2a65 a77ed8b 3cb032f a77ed8b 67462b2 3cb032f a77ed8b 15f2a65 3cb032f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | import os
import gradio as gr
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_community.llms import OpenAI # β
Fixed Import
from langchain_huggingface import HuggingFaceEndpoint, HuggingFaceEmbeddings # β
Fixed Import
from langchain_community.document_loaders import PyPDFLoader # β
Fixed Import
import time
os.system("pip install -U huggingface_hub langchain_huggingface langchain_core langchain")
# Define paths for cybersecurity training PDFs
PDF_FILES = [
"ISOIEC 27001_2ef522.pdf",
"ISO-IEC-27005-2022.pdf",
"MITRE ATLAS Overview Combined_v1.pdf",
"NIST_CSWP_04162018.pdf"
]
# Fetch Hugging Face API token securely from environment variables
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if HUGGINGFACE_API_KEY is None:
raise ValueError("β Hugging Face API token is missing! Set it in Hugging Face Spaces Secrets.")
# Load PDFs into ChromaDB
def load_data():
"""Loads multiple PDFs and stores embeddings in ChromaDB"""
all_docs = []
for pdf in PDF_FILES:
if os.path.exists(pdf): # Ensure the PDF exists in the Hugging Face Space
loader = PyPDFLoader(pdf)
all_docs.extend(loader.load())
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") # β
Updated import
return Chroma.from_documents(all_docs, embeddings)
# Load the knowledge base
vector_db = load_data()
# Load LLM from Hugging Face securely
llm = HuggingFaceEndpoint(
repo_id="google/flan-t5-large",
temperature=0.5, # β
Ensure temperature is explicit
max_new_tokens=250, # β
Ensure API limit is followed
huggingfacehub_api_token=HUGGINGFACE_API_KEY
)
# Create Retrieval QA chain
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vector_db.as_retriever())
# Function to simulate futuristic typing effect
def chatbot_response(question):
"""Handles chatbot queries with a typing effect"""
response = qa_chain.invoke(question) # β
Use `invoke` instead of deprecated `run`
displayed_response = ""
for char in response:
displayed_response += char
time.sleep(0.02) # Simulate typing delay
yield displayed_response
# Custom futuristic CSS styling
custom_css = """
body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;}
.gradio-container {background: linear-gradient(to bottom, #020c1b, #001f3f);}
textarea {background: #011627; color: #0ff; font-size: 18px;}
button {background: #0088ff; color: white; font-size: 20px; border-radius: 5px; border: none; padding: 10px;}
button:hover {background: #00ffff; color: #000;}
"""
# Create Gradio Chatbot Interface
iface = gr.Interface(
fn=chatbot_response,
inputs="text",
outputs="text",
title="π€ Cybersecurity AI Assistant",
description="Ask me about NIST, ISO/IEC 27001, MITRE ATLAS, and ISO/IEC 27005. Powered by AI.",
theme="default",
css=custom_css,
live=True, # Enables real-time updates for typing effect
)
# Launch chatbot
iface.launch()
|