Mr-HASSAN commited on
Commit
a136b37
Β·
verified Β·
1 Parent(s): 2767c17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -117
app.py CHANGED
@@ -1,118 +1,110 @@
1
- import gradio as gr
2
- import torch
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
-
5
- # HuatuoGPT model
6
- model_name = "FreedomIntelligence/HuatuoGPT-7B"
7
-
8
-
9
- class MedicalAI:
10
- def __init__(self):
11
- self.model = None
12
- self.tokenizer = None
13
- self.loaded = False
14
-
15
- def load_model(self):
16
- if self.loaded:
17
- return True
18
-
19
- try:
20
- print("πŸ”„ Loading HuatuoGPT-7B...")
21
- self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
22
- self.model = AutoModelForCausalLM.from_pretrained(
23
- model_name,
24
- torch_dtype=torch.float16,
25
- device_map="auto",
26
- trust_remote_code=True
27
- )
28
- self.loaded = True
29
- print("βœ… HuatuoGPT Ready!")
30
- return True
31
- except Exception as e:
32
- print(f"❌ Error: {e}")
33
- return False
34
-
35
-
36
- medical_ai = MedicalAI()
37
-
38
-
39
- def get_ai_thinking(message):
40
- """Get HuatuoGPT's complete reasoning process"""
41
- if not medical_ai.loaded:
42
- success = medical_ai.load_model()
43
- if not success:
44
- return "πŸ”„ Loading AI... Please wait."
45
-
46
- try:
47
- # Simple one-line prompt to see natural reasoning
48
- prompt = f"Question: {message}\nAnswer:"
49
-
50
- inputs = medical_ai.tokenizer(prompt, return_tensors="pt")
51
-
52
- # Generate longer response to see full reasoning
53
- with torch.no_grad():
54
- outputs = medical_ai.model.generate(
55
- inputs.input_ids,
56
- max_new_tokens=400, # More tokens to see reasoning
57
- temperature=0.7,
58
- do_sample=True,
59
- top_p=0.9,
60
- repetition_penalty=1.1,
61
- pad_token_id=medical_ai.tokenizer.eos_token_id
62
- )
63
-
64
- # Get full response including reasoning
65
- full_response = medical_ai.tokenizer.decode(outputs[0], skip_special_tokens=True)
66
-
67
- # Extract everything after "Answer:"
68
- if "Answer:" in full_response:
69
- reasoning = full_response.split("Answer:")[1].strip()
70
- else:
71
- reasoning = full_response.replace(prompt, "").strip()
72
-
73
- return reasoning
74
-
75
- except Exception as e:
76
- return f"❌ Error: {str(e)}"
77
-
78
-
79
- # Clean interface focused on reasoning
80
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
81
- gr.Markdown("# 🧠 HuatuoGPT Medical Reasoning")
82
- gr.Markdown("**See how the AI doctor thinks and reasons**")
83
-
84
- with gr.Row():
85
- with gr.Column(scale=1):
86
- input_box = gr.Textbox(
87
- label="Your Medical Question",
88
- placeholder="headache? fever? sleep problems?",
89
- max_lines=1, # Single line only
90
- show_copy_button=True
91
- )
92
- ask_btn = gr.Button("πŸ€” Ask HuatuoGPT", variant="primary")
93
-
94
- with gr.Column(scale=2):
95
- output_box = gr.Textbox(
96
- label="HuatuoGPT's Reasoning",
97
- placeholder="AI thinking will appear here...",
98
- lines=8,
99
- show_copy_button=True
100
- )
101
-
102
- clear_btn = gr.Button("πŸ—‘οΈ Clear")
103
-
104
-
105
- # Function to get AI reasoning
106
- def show_reasoning(message):
107
- if not message.strip():
108
- return "Please enter a medical question."
109
- return get_ai_thinking(message)
110
-
111
-
112
- # Connect actions
113
- input_box.submit(show_reasoning, inputs=input_box, outputs=output_box)
114
- ask_btn.click(show_reasoning, inputs=input_box, outputs=output_box)
115
- clear_btn.click(lambda: ("", ""), outputs=[input_box, output_box])
116
-
117
- if __name__ == "__main__":
118
  demo.launch()
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ # HuatuoGPT model
6
+ model_name = "FreedomIntelligence/HuatuoGPT-7B"
7
+
8
+ class MedicalAI:
9
+ def __init__(self):
10
+ self.model = None
11
+ self.tokenizer = None
12
+ self.loaded = False
13
+
14
+ def load_model(self):
15
+ if self.loaded:
16
+ return True
17
+
18
+ try:
19
+ print("πŸ”„ Loading HuatuoGPT-7B...")
20
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
21
+ self.model = AutoModelForCausalLM.from_pretrained(
22
+ model_name,
23
+ torch_dtype=torch.float16,
24
+ device_map="auto",
25
+ trust_remote_code=True
26
+ )
27
+ self.loaded = True
28
+ print("βœ… HuatuoGPT Ready!")
29
+ return True
30
+ except Exception as e:
31
+ print(f"❌ Error: {e}")
32
+ return False
33
+
34
+ medical_ai = MedicalAI()
35
+
36
+ def get_ai_thinking(message):
37
+ """Get HuatuoGPT's complete reasoning process"""
38
+ if not medical_ai.loaded:
39
+ success = medical_ai.load_model()
40
+ if not success:
41
+ return "πŸ”„ Loading AI... Please wait."
42
+
43
+ try:
44
+ # Simple one-line prompt to see natural reasoning
45
+ prompt = f"Question: {message}\nAnswer:"
46
+
47
+ inputs = medical_ai.tokenizer(prompt, return_tensors="pt")
48
+
49
+ # Generate longer response to see full reasoning
50
+ with torch.no_grad():
51
+ outputs = medical_ai.model.generate(
52
+ inputs.input_ids,
53
+ max_new_tokens=400, # More tokens to see reasoning
54
+ temperature=0.7,
55
+ do_sample=True,
56
+ top_p=0.9,
57
+ repetition_penalty=1.1,
58
+ pad_token_id=medical_ai.tokenizer.eos_token_id
59
+ )
60
+
61
+ # Get full response including reasoning
62
+ full_response = medical_ai.tokenizer.decode(outputs[0], skip_special_tokens=True)
63
+
64
+ # Extract everything after "Answer:"
65
+ if "Answer:" in full_response:
66
+ reasoning = full_response.split("Answer:")[1].strip()
67
+ else:
68
+ reasoning = full_response.replace(prompt, "").strip()
69
+
70
+ return reasoning
71
+
72
+ except Exception as e:
73
+ return f"❌ Error: {str(e)}"
74
+
75
+ # FIXED: Remove theme parameter
76
+ with gr.Blocks() as demo: # Removed: theme=gr.themes.Soft()
77
+ gr.Markdown("# 🧠 HuatuoGPT Medical Reasoning")
78
+ gr.Markdown("**See how the AI doctor thinks and reasons**")
79
+
80
+ with gr.Row():
81
+ with gr.Column(scale=1):
82
+ input_box = gr.Textbox(
83
+ label="Your Medical Question",
84
+ placeholder="headache? fever? sleep problems?",
85
+ max_lines=1, # Single line only
86
+ )
87
+ ask_btn = gr.Button("πŸ€” Ask HuatuoGPT", variant="primary")
88
+
89
+ with gr.Column(scale=2):
90
+ output_box = gr.Textbox(
91
+ label="HuatuoGPT's Reasoning",
92
+ placeholder="AI thinking will appear here...",
93
+ lines=8,
94
+ )
95
+
96
+ clear_btn = gr.Button("πŸ—‘οΈ Clear")
97
+
98
+ # Function to get AI reasoning
99
+ def show_reasoning(message):
100
+ if not message.strip():
101
+ return "Please enter a medical question."
102
+ return get_ai_thinking(message)
103
+
104
+ # Connect actions
105
+ input_box.submit(show_reasoning, inputs=input_box, outputs=output_box)
106
+ ask_btn.click(show_reasoning, inputs=input_box, outputs=output_box)
107
+ clear_btn.click(lambda: ("", ""), outputs=[input_box, output_box])
108
+
109
+ if __name__ == "__main__":
 
 
 
 
 
 
 
 
110
  demo.launch()