Mr-HASSAN commited on
Commit
2767c17
Β·
verified Β·
1 Parent(s): 65b8275

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +118 -0
  2. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ # HuatuoGPT model
6
+ model_name = "FreedomIntelligence/HuatuoGPT-7B"
7
+
8
+
9
+ class MedicalAI:
10
+ def __init__(self):
11
+ self.model = None
12
+ self.tokenizer = None
13
+ self.loaded = False
14
+
15
+ def load_model(self):
16
+ if self.loaded:
17
+ return True
18
+
19
+ try:
20
+ print("πŸ”„ Loading HuatuoGPT-7B...")
21
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
22
+ self.model = AutoModelForCausalLM.from_pretrained(
23
+ model_name,
24
+ torch_dtype=torch.float16,
25
+ device_map="auto",
26
+ trust_remote_code=True
27
+ )
28
+ self.loaded = True
29
+ print("βœ… HuatuoGPT Ready!")
30
+ return True
31
+ except Exception as e:
32
+ print(f"❌ Error: {e}")
33
+ return False
34
+
35
+
36
+ medical_ai = MedicalAI()
37
+
38
+
39
+ def get_ai_thinking(message):
40
+ """Get HuatuoGPT's complete reasoning process"""
41
+ if not medical_ai.loaded:
42
+ success = medical_ai.load_model()
43
+ if not success:
44
+ return "πŸ”„ Loading AI... Please wait."
45
+
46
+ try:
47
+ # Simple one-line prompt to see natural reasoning
48
+ prompt = f"Question: {message}\nAnswer:"
49
+
50
+ inputs = medical_ai.tokenizer(prompt, return_tensors="pt")
51
+
52
+ # Generate longer response to see full reasoning
53
+ with torch.no_grad():
54
+ outputs = medical_ai.model.generate(
55
+ inputs.input_ids,
56
+ max_new_tokens=400, # More tokens to see reasoning
57
+ temperature=0.7,
58
+ do_sample=True,
59
+ top_p=0.9,
60
+ repetition_penalty=1.1,
61
+ pad_token_id=medical_ai.tokenizer.eos_token_id
62
+ )
63
+
64
+ # Get full response including reasoning
65
+ full_response = medical_ai.tokenizer.decode(outputs[0], skip_special_tokens=True)
66
+
67
+ # Extract everything after "Answer:"
68
+ if "Answer:" in full_response:
69
+ reasoning = full_response.split("Answer:")[1].strip()
70
+ else:
71
+ reasoning = full_response.replace(prompt, "").strip()
72
+
73
+ return reasoning
74
+
75
+ except Exception as e:
76
+ return f"❌ Error: {str(e)}"
77
+
78
+
79
+ # Clean interface focused on reasoning
80
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
81
+ gr.Markdown("# 🧠 HuatuoGPT Medical Reasoning")
82
+ gr.Markdown("**See how the AI doctor thinks and reasons**")
83
+
84
+ with gr.Row():
85
+ with gr.Column(scale=1):
86
+ input_box = gr.Textbox(
87
+ label="Your Medical Question",
88
+ placeholder="headache? fever? sleep problems?",
89
+ max_lines=1, # Single line only
90
+ show_copy_button=True
91
+ )
92
+ ask_btn = gr.Button("πŸ€” Ask HuatuoGPT", variant="primary")
93
+
94
+ with gr.Column(scale=2):
95
+ output_box = gr.Textbox(
96
+ label="HuatuoGPT's Reasoning",
97
+ placeholder="AI thinking will appear here...",
98
+ lines=8,
99
+ show_copy_button=True
100
+ )
101
+
102
+ clear_btn = gr.Button("πŸ—‘οΈ Clear")
103
+
104
+
105
+ # Function to get AI reasoning
106
+ def show_reasoning(message):
107
+ if not message.strip():
108
+ return "Please enter a medical question."
109
+ return get_ai_thinking(message)
110
+
111
+
112
+ # Connect actions
113
+ input_box.submit(show_reasoning, inputs=input_box, outputs=output_box)
114
+ ask_btn.click(show_reasoning, inputs=input_box, outputs=output_box)
115
+ clear_btn.click(lambda: ("", ""), outputs=[input_box, output_box])
116
+
117
+ if __name__ == "__main__":
118
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch>=2.1.0
2
+ transformers>=4.30.0
3
+ gradio>=4.0.0
4
+ accelerate
5
+ sentencepiece
6
+ protobuf