Spaces:
Runtime error
Runtime error
File size: 3,404 Bytes
a77ed8b 15f2a65 03be11b a77ed8b 301bbfc 03be11b 15f2a65 03be11b e77844a 67462b2 15f2a65 3cb032f 03be11b 3cb032f 15f2a65 3cb032f a77ed8b 03be11b a77ed8b 15f2a65 03be11b 15f2a65 a77ed8b 15f2a65 3cb032f a77ed8b 15f2a65 03be11b 3cb032f 15f2a65 03be11b 3cb032f 03be11b 15f2a65 3cb032f a77ed8b bbe760a a77ed8b 15f2a65 67462b2 a77ed8b 15f2a65 a77ed8b 3cb032f a77ed8b 67462b2 3cb032f a77ed8b 15f2a65 03be11b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 | import os
import gradio as gr
import time
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_community.document_loaders import PyPDFLoader
from langchain_huggingface import HuggingFaceEmbeddings # β
Fixed Import
from huggingface_hub import InferenceClient # β
New method for querying Hugging Face LLM
# Install required dependencies (ensure latest versions)
os.system("pip install -U huggingface_hub langchain_huggingface langchain_core langchain gradio")
# Define paths for cybersecurity training PDFs
PDF_FILES = [
"ISOIEC 27001_2ef522.pdf",
"ISO-IEC-27005-2022.pdf",
"MITRE ATLAS Overview Combined_v1.pdf",
"NIST_CSWP_04162018.pdf"
]
# Fetch Hugging Face API token securely from environment variables
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if HUGGINGFACE_API_KEY is None:
raise ValueError("β Hugging Face API token is missing! Set it in Hugging Face Spaces Secrets.")
# Load PDFs into ChromaDB
def load_data():
"""Loads multiple PDFs and stores embeddings in ChromaDB"""
all_docs = []
for pdf in PDF_FILES:
if os.path.exists(pdf): # Ensure the PDF exists
loader = PyPDFLoader(pdf)
all_docs.extend(loader.load())
# Use updated embedding model
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
return Chroma.from_documents(all_docs, embeddings)
# Load the knowledge base
vector_db = load_data()
# Initialize Hugging Face Inference Client (new recommended method)
client = InferenceClient(
"https://api-inference.huggingface.co/models/google/flan-t5-large",
token=HUGGINGFACE_API_KEY
)
# Function to interact with the Hugging Face model
def query_llm(prompt):
"""Send query to Hugging Face API and return response"""
response = client.chat_completion(messages=[{"role": "user", "content": prompt}])
return response["choices"][0]["message"]["content"]
# Create Retrieval QA chain
qa_chain = RetrievalQA.from_chain_type(llm=query_llm, retriever=vector_db.as_retriever())
# Function to simulate futuristic typing effect
def chatbot_response(question):
"""Handles chatbot queries with a typing effect"""
response = qa_chain.invoke(question) # β
Use `invoke` instead of deprecated `run`
displayed_response = ""
for char in response:
displayed_response += char
time.sleep(0.02) # Simulate typing delay
yield displayed_response
# Custom futuristic CSS styling
custom_css = """
body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;}
.gradio-container {background: linear-gradient(to bottom, #020c1b, #001f3f);}
textarea {background: #011627; color: #0ff; font-size: 18px;}
button {background: #0088ff; color: white; font-size: 20px; border-radius: 5px; border: none; padding: 10px;}
button:hover {background: #00ffff; color: #000;}
"""
# Create Gradio Chatbot Interface
iface = gr.Interface(
fn=chatbot_response,
inputs="text",
outputs="text",
title="π€ Cybersecurity AI Assistant",
description="Ask me about NIST, ISO/IEC 27001, MITRE ATLAS, and ISO/IEC 27005. Powered by AI.",
theme="default",
css=custom_css,
live=True, # Enables real-time updates for typing effect
)
# Launch chatbot with public link
iface.launch(share=True) # β
Now launches with a public link
|