Spaces:
Runtime error
Runtime error
| import os | |
| import gradio as gr | |
| import time | |
| from langchain.chains import RetrievalQA | |
| from langchain_community.vectorstores import Chroma | |
| from langchain_community.document_loaders import PyPDFLoader | |
| from langchain_huggingface import HuggingFaceEmbeddings # β Fixed Import | |
| from huggingface_hub import InferenceClient # β New method for querying Hugging Face LLM | |
| # Install required dependencies (ensure latest versions) | |
| os.system("pip install -U huggingface_hub langchain_huggingface langchain_core langchain gradio") | |
| # Define paths for cybersecurity training PDFs | |
| PDF_FILES = [ | |
| "ISOIEC 27001_2ef522.pdf", | |
| "ISO-IEC-27005-2022.pdf", | |
| "MITRE ATLAS Overview Combined_v1.pdf", | |
| "NIST_CSWP_04162018.pdf" | |
| ] | |
| # Fetch Hugging Face API token securely from environment variables | |
| HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| if HUGGINGFACE_API_KEY is None: | |
| raise ValueError("β Hugging Face API token is missing! Set it in Hugging Face Spaces Secrets.") | |
| # Load PDFs into ChromaDB | |
| def load_data(): | |
| """Loads multiple PDFs and stores embeddings in ChromaDB""" | |
| all_docs = [] | |
| for pdf in PDF_FILES: | |
| if os.path.exists(pdf): # Ensure the PDF exists | |
| loader = PyPDFLoader(pdf) | |
| all_docs.extend(loader.load()) | |
| # Use updated embedding model | |
| embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
| return Chroma.from_documents(all_docs, embeddings) | |
| # Load the knowledge base | |
| vector_db = load_data() | |
| # Initialize Hugging Face Inference Client (new recommended method) | |
| client = InferenceClient( | |
| "https://api-inference.huggingface.co/models/google/flan-t5-large", | |
| token=HUGGINGFACE_API_KEY | |
| ) | |
| # Function to interact with the Hugging Face model | |
| def query_llm(prompt): | |
| """Send query to Hugging Face API and return response""" | |
| response = client.chat_completion(messages=[{"role": "user", "content": prompt}]) | |
| return response["choices"][0]["message"]["content"] | |
| # Create Retrieval QA chain | |
| qa_chain = RetrievalQA.from_chain_type(llm=query_llm, retriever=vector_db.as_retriever()) | |
| # Function to simulate futuristic typing effect | |
| def chatbot_response(question): | |
| """Handles chatbot queries with a typing effect""" | |
| response = qa_chain.invoke(question) # β Use `invoke` instead of deprecated `run` | |
| displayed_response = "" | |
| for char in response: | |
| displayed_response += char | |
| time.sleep(0.02) # Simulate typing delay | |
| yield displayed_response | |
| # Custom futuristic CSS styling | |
| custom_css = """ | |
| body {background-color: #0f172a; color: #0ff; font-family: 'Orbitron', sans-serif;} | |
| .gradio-container {background: linear-gradient(to bottom, #020c1b, #001f3f);} | |
| textarea {background: #011627; color: #0ff; font-size: 18px;} | |
| button {background: #0088ff; color: white; font-size: 20px; border-radius: 5px; border: none; padding: 10px;} | |
| button:hover {background: #00ffff; color: #000;} | |
| """ | |
| # Create Gradio Chatbot Interface | |
| iface = gr.Interface( | |
| fn=chatbot_response, | |
| inputs="text", | |
| outputs="text", | |
| title="π€ Cybersecurity AI Assistant", | |
| description="Ask me about NIST, ISO/IEC 27001, MITRE ATLAS, and ISO/IEC 27005. Powered by AI.", | |
| theme="default", | |
| css=custom_css, | |
| live=True, # Enables real-time updates for typing effect | |
| ) | |
| # Launch chatbot with public link | |
| iface.launch(share=True) # β Now launches with a public link | |