Manik Sheokand commited on
Commit
cf76e86
·
1 Parent(s): 7ee6c00

Fix: Add @spaces.GPU decorator and huggingface_hub dependency for GPU detection

Browse files
Files changed (6) hide show
  1. app.py +7 -1
  2. app.py.backup +226 -0
  3. app.py.old +226 -0
  4. requirements.txt +1 -0
  5. requirements.txt.backup +22 -0
  6. requirements.txt.old +22 -0
app.py CHANGED
@@ -3,6 +3,7 @@ import torch
3
  from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
4
  from PIL import Image
5
  import logging
 
6
 
7
  # Configure logging
8
  logging.basicConfig(level=logging.INFO)
@@ -214,7 +215,9 @@ def create_interface():
214
 
215
  return demo
216
 
217
- if __name__ == "__main__":
 
 
218
  # Create and launch the interface
219
  demo = create_interface()
220
  demo.launch(
@@ -224,3 +227,6 @@ if __name__ == "__main__":
224
  show_error=True
225
  )
226
 
 
 
 
 
3
  from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
4
  from PIL import Image
5
  import logging
6
+ from huggingface_hub import spaces
7
 
8
  # Configure logging
9
  logging.basicConfig(level=logging.INFO)
 
215
 
216
  return demo
217
 
218
+ @spaces.GPU
219
+ def main():
220
+ """Main function with GPU decorator for Hugging Face Spaces"""
221
  # Create and launch the interface
222
  demo = create_interface()
223
  demo.launch(
 
227
  show_error=True
228
  )
229
 
230
+ if __name__ == "__main__":
231
+ main()
232
+
app.py.backup ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
4
+ from PIL import Image
5
+ import logging
6
+
7
+ # Configure logging
8
+ logging.basicConfig(level=logging.INFO)
9
+ logger = logging.getLogger(__name__)
10
+
11
+ # Global variables for model and processor
12
+ model = None
13
+ processor = None
14
+
15
+ def load_model():
16
+ """Load the fine-tuned dermatology model"""
17
+ global model, processor
18
+
19
+ try:
20
+ # Load the merged model (replace with your actual model path)
21
+ model_name = "ColdSlim/Dermatology-Qwen2.5-VL-3B" # Update with your actual model name
22
+
23
+ logger.info(f"Loading model: {model_name}")
24
+ processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
25
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
26
+ model_name,
27
+ torch_dtype=torch.bfloat16,
28
+ device_map="auto",
29
+ trust_remote_code=True
30
+ )
31
+
32
+ logger.info("Model loaded successfully!")
33
+ return True
34
+
35
+ except Exception as e:
36
+ logger.error(f"Error loading model: {e}")
37
+ return False
38
+
39
+ def analyze_skin_condition(image, question="Describe this skin condition in detail."):
40
+ """Analyze skin condition from uploaded image"""
41
+ global model, processor
42
+
43
+ if model is None or processor is None:
44
+ return "❌ Model not loaded. Please wait for the model to load or contact the administrator."
45
+
46
+ if image is None:
47
+ return "❌ Please upload an image first."
48
+
49
+ try:
50
+ # Prepare the conversation
51
+ messages = [
52
+ {
53
+ "role": "user",
54
+ "content": [
55
+ {"type": "image", "image": image},
56
+ {"type": "text", "text": question}
57
+ ]
58
+ }
59
+ ]
60
+
61
+ # Process the input
62
+ text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
63
+ image_inputs, video_inputs = processor.process_vision_info(messages)
64
+
65
+ inputs = processor(
66
+ text=[text],
67
+ images=image_inputs,
68
+ videos=video_inputs,
69
+ padding=True,
70
+ return_tensors="pt"
71
+ )
72
+
73
+ # Move inputs to the same device as model
74
+ inputs = {k: v.to(model.device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
75
+
76
+ # Generate response
77
+ with torch.no_grad():
78
+ generated_ids = model.generate(
79
+ **inputs,
80
+ max_new_tokens=512,
81
+ do_sample=True,
82
+ temperature=0.7,
83
+ top_p=0.9,
84
+ pad_token_id=processor.tokenizer.eos_token_id
85
+ )
86
+
87
+ # Decode the response
88
+ generated_ids_trimmed = [
89
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
90
+ ]
91
+ output_text = processor.batch_decode(
92
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
93
+ )[0]
94
+
95
+ return output_text
96
+
97
+ except Exception as e:
98
+ logger.error(f"Error during inference: {e}")
99
+ return f"❌ Error analyzing image: {str(e)}"
100
+
101
+ def create_interface():
102
+ """Create the Gradio interface"""
103
+
104
+ # Load model on startup
105
+ model_loaded = load_model()
106
+
107
+ with gr.Blocks(
108
+ title="Dermatology AI Assistant",
109
+ theme=gr.themes.Soft(),
110
+ css="""
111
+ .gradio-container {
112
+ max-width: 1200px !important;
113
+ margin: auto !important;
114
+ }
115
+ .main-header {
116
+ text-align: center;
117
+ margin-bottom: 2rem;
118
+ }
119
+ .warning-box {
120
+ background-color: #fff3cd;
121
+ border: 1px solid #ffeaa7;
122
+ border-radius: 8px;
123
+ padding: 1rem;
124
+ margin: 1rem 0;
125
+ }
126
+ """
127
+ ) as demo:
128
+
129
+ gr.HTML("""
130
+ <div class="main-header">
131
+ <h1>🩺 Dermatology AI Assistant</h1>
132
+ <p>Powered by Qwen2.5-VL-3B fine-tuned for dermatology analysis</p>
133
+ </div>
134
+ """)
135
+
136
+ # Warning message
137
+ gr.HTML("""
138
+ <div class="warning-box">
139
+ <h3>⚠️ Medical Disclaimer</h3>
140
+ <p>This AI assistant is for educational and research purposes only.
141
+ It should not be used as a substitute for professional medical advice,
142
+ diagnosis, or treatment. Always consult with a qualified healthcare
143
+ provider for medical concerns.</p>
144
+ </div>
145
+ """)
146
+
147
+ with gr.Row():
148
+ with gr.Column(scale=1):
149
+ # Image upload
150
+ image_input = gr.Image(
151
+ label="Upload Skin Image",
152
+ type="pil",
153
+ height=400
154
+ )
155
+
156
+ # Question input
157
+ question_input = gr.Textbox(
158
+ label="Question (Optional)",
159
+ placeholder="Describe this skin condition in detail.",
160
+ value="Describe this skin condition in detail.",
161
+ lines=3
162
+ )
163
+
164
+ # Analyze button
165
+ analyze_btn = gr.Button(
166
+ "🔍 Analyze Skin Condition",
167
+ variant="primary",
168
+ size="lg"
169
+ )
170
+
171
+ # Example questions
172
+ gr.HTML("""
173
+ <h4>💡 Example Questions:</h4>
174
+ <ul>
175
+ <li>What type of skin condition is this?</li>
176
+ <li>Describe the characteristics of this lesion.</li>
177
+ <li>What are the potential causes of this skin issue?</li>
178
+ <li>What should I know about this skin condition?</li>
179
+ </ul>
180
+ """)
181
+
182
+ with gr.Column(scale=1):
183
+ # Output
184
+ output_text = gr.Textbox(
185
+ label="AI Analysis",
186
+ lines=15,
187
+ max_lines=20,
188
+ show_copy_button=True
189
+ )
190
+
191
+ # Examples
192
+ gr.Examples(
193
+ examples=[
194
+ ["What type of skin condition is this?", "Describe this skin condition in detail."],
195
+ ["What are the characteristics of this lesion?", "Describe this skin condition in detail."],
196
+ ["What should I know about this skin issue?", "Describe this skin condition in detail."],
197
+ ],
198
+ inputs=[question_input, question_input],
199
+ label="💡 Example Questions"
200
+ )
201
+
202
+ # Event handlers
203
+ analyze_btn.click(
204
+ fn=analyze_skin_condition,
205
+ inputs=[image_input, question_input],
206
+ outputs=output_text
207
+ )
208
+
209
+ # Model status
210
+ if model_loaded:
211
+ gr.HTML("<div style='text-align: center; color: green;'>✅ Model loaded successfully!</div>")
212
+ else:
213
+ gr.HTML("<div style='text-align: center; color: red;'>❌ Model loading failed. Please check the logs.</div>")
214
+
215
+ return demo
216
+
217
+ if __name__ == "__main__":
218
+ # Create and launch the interface
219
+ demo = create_interface()
220
+ demo.launch(
221
+ server_name="0.0.0.0",
222
+ server_port=7860,
223
+ share=False,
224
+ show_error=True
225
+ )
226
+
app.py.old ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
4
+ from PIL import Image
5
+ import logging
6
+
7
+ # Configure logging
8
+ logging.basicConfig(level=logging.INFO)
9
+ logger = logging.getLogger(__name__)
10
+
11
+ # Global variables for model and processor
12
+ model = None
13
+ processor = None
14
+
15
+ def load_model():
16
+ """Load the fine-tuned dermatology model"""
17
+ global model, processor
18
+
19
+ try:
20
+ # Load the merged model (replace with your actual model path)
21
+ model_name = "ColdSlim/Dermatology-Qwen2.5-VL-3B" # Update with your actual model name
22
+
23
+ logger.info(f"Loading model: {model_name}")
24
+ processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
25
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
26
+ model_name,
27
+ torch_dtype=torch.bfloat16,
28
+ device_map="auto",
29
+ trust_remote_code=True
30
+ )
31
+
32
+ logger.info("Model loaded successfully!")
33
+ return True
34
+
35
+ except Exception as e:
36
+ logger.error(f"Error loading model: {e}")
37
+ return False
38
+
39
+ def analyze_skin_condition(image, question="Describe this skin condition in detail."):
40
+ """Analyze skin condition from uploaded image"""
41
+ global model, processor
42
+
43
+ if model is None or processor is None:
44
+ return "❌ Model not loaded. Please wait for the model to load or contact the administrator."
45
+
46
+ if image is None:
47
+ return "❌ Please upload an image first."
48
+
49
+ try:
50
+ # Prepare the conversation
51
+ messages = [
52
+ {
53
+ "role": "user",
54
+ "content": [
55
+ {"type": "image", "image": image},
56
+ {"type": "text", "text": question}
57
+ ]
58
+ }
59
+ ]
60
+
61
+ # Process the input
62
+ text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
63
+ image_inputs, video_inputs = processor.process_vision_info(messages)
64
+
65
+ inputs = processor(
66
+ text=[text],
67
+ images=image_inputs,
68
+ videos=video_inputs,
69
+ padding=True,
70
+ return_tensors="pt"
71
+ )
72
+
73
+ # Move inputs to the same device as model
74
+ inputs = {k: v.to(model.device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
75
+
76
+ # Generate response
77
+ with torch.no_grad():
78
+ generated_ids = model.generate(
79
+ **inputs,
80
+ max_new_tokens=512,
81
+ do_sample=True,
82
+ temperature=0.7,
83
+ top_p=0.9,
84
+ pad_token_id=processor.tokenizer.eos_token_id
85
+ )
86
+
87
+ # Decode the response
88
+ generated_ids_trimmed = [
89
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
90
+ ]
91
+ output_text = processor.batch_decode(
92
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
93
+ )[0]
94
+
95
+ return output_text
96
+
97
+ except Exception as e:
98
+ logger.error(f"Error during inference: {e}")
99
+ return f"❌ Error analyzing image: {str(e)}"
100
+
101
+ def create_interface():
102
+ """Create the Gradio interface"""
103
+
104
+ # Load model on startup
105
+ model_loaded = load_model()
106
+
107
+ with gr.Blocks(
108
+ title="Dermatology AI Assistant",
109
+ theme=gr.themes.Soft(),
110
+ css="""
111
+ .gradio-container {
112
+ max-width: 1200px !important;
113
+ margin: auto !important;
114
+ }
115
+ .main-header {
116
+ text-align: center;
117
+ margin-bottom: 2rem;
118
+ }
119
+ .warning-box {
120
+ background-color: #fff3cd;
121
+ border: 1px solid #ffeaa7;
122
+ border-radius: 8px;
123
+ padding: 1rem;
124
+ margin: 1rem 0;
125
+ }
126
+ """
127
+ ) as demo:
128
+
129
+ gr.HTML("""
130
+ <div class="main-header">
131
+ <h1>🩺 Dermatology AI Assistant</h1>
132
+ <p>Powered by Qwen2.5-VL-3B fine-tuned for dermatology analysis</p>
133
+ </div>
134
+ """)
135
+
136
+ # Warning message
137
+ gr.HTML("""
138
+ <div class="warning-box">
139
+ <h3>⚠️ Medical Disclaimer</h3>
140
+ <p>This AI assistant is for educational and research purposes only.
141
+ It should not be used as a substitute for professional medical advice,
142
+ diagnosis, or treatment. Always consult with a qualified healthcare
143
+ provider for medical concerns.</p>
144
+ </div>
145
+ """)
146
+
147
+ with gr.Row():
148
+ with gr.Column(scale=1):
149
+ # Image upload
150
+ image_input = gr.Image(
151
+ label="Upload Skin Image",
152
+ type="pil",
153
+ height=400
154
+ )
155
+
156
+ # Question input
157
+ question_input = gr.Textbox(
158
+ label="Question (Optional)",
159
+ placeholder="Describe this skin condition in detail.",
160
+ value="Describe this skin condition in detail.",
161
+ lines=3
162
+ )
163
+
164
+ # Analyze button
165
+ analyze_btn = gr.Button(
166
+ "🔍 Analyze Skin Condition",
167
+ variant="primary",
168
+ size="lg"
169
+ )
170
+
171
+ # Example questions
172
+ gr.HTML("""
173
+ <h4>💡 Example Questions:</h4>
174
+ <ul>
175
+ <li>What type of skin condition is this?</li>
176
+ <li>Describe the characteristics of this lesion.</li>
177
+ <li>What are the potential causes of this skin issue?</li>
178
+ <li>What should I know about this skin condition?</li>
179
+ </ul>
180
+ """)
181
+
182
+ with gr.Column(scale=1):
183
+ # Output
184
+ output_text = gr.Textbox(
185
+ label="AI Analysis",
186
+ lines=15,
187
+ max_lines=20,
188
+ show_copy_button=True
189
+ )
190
+
191
+ # Examples
192
+ gr.Examples(
193
+ examples=[
194
+ ["What type of skin condition is this?", "Describe this skin condition in detail."],
195
+ ["What are the characteristics of this lesion?", "Describe this skin condition in detail."],
196
+ ["What should I know about this skin issue?", "Describe this skin condition in detail."],
197
+ ],
198
+ inputs=[question_input, question_input],
199
+ label="💡 Example Questions"
200
+ )
201
+
202
+ # Event handlers
203
+ analyze_btn.click(
204
+ fn=analyze_skin_condition,
205
+ inputs=[image_input, question_input],
206
+ outputs=output_text
207
+ )
208
+
209
+ # Model status
210
+ if model_loaded:
211
+ gr.HTML("<div style='text-align: center; color: green;'>✅ Model loaded successfully!</div>")
212
+ else:
213
+ gr.HTML("<div style='text-align: center; color: red;'>❌ Model loading failed. Please check the logs.</div>")
214
+
215
+ return demo
216
+
217
+ if __name__ == "__main__":
218
+ # Create and launch the interface
219
+ demo = create_interface()
220
+ demo.launch(
221
+ server_name="0.0.0.0",
222
+ server_port=7860,
223
+ share=False,
224
+ show_error=True
225
+ )
226
+
requirements.txt CHANGED
@@ -4,6 +4,7 @@ torch>=2.0.0
4
  transformers>=4.37.0
5
  accelerate>=0.20.0
6
  gradio>=4.0.0
 
7
 
8
  # Vision and image processing
9
  Pillow>=9.0.0
 
4
  transformers>=4.37.0
5
  accelerate>=0.20.0
6
  gradio>=4.0.0
7
+ huggingface_hub>=0.20.0
8
 
9
  # Vision and image processing
10
  Pillow>=9.0.0
requirements.txt.backup ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Spaces Requirements for Dermatology AI Assistant
2
+ # Core dependencies
3
+ torch>=2.0.0
4
+ transformers>=4.37.0
5
+ accelerate>=0.20.0
6
+ gradio>=4.0.0
7
+
8
+ # Vision and image processing
9
+ Pillow>=9.0.0
10
+ opencv-python>=4.5.0
11
+
12
+ # Qwen2-VL specific
13
+ qwen-vl-utils>=0.0.1
14
+
15
+ # Optional: For better performance
16
+ flash-attn>=2.0.0
17
+ deepspeed>=0.10.0
18
+
19
+ # Utilities
20
+ numpy>=1.21.0
21
+ requests>=2.25.0
22
+
requirements.txt.old ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Spaces Requirements for Dermatology AI Assistant
2
+ # Core dependencies
3
+ torch>=2.0.0
4
+ transformers>=4.37.0
5
+ accelerate>=0.20.0
6
+ gradio>=4.0.0
7
+
8
+ # Vision and image processing
9
+ Pillow>=9.0.0
10
+ opencv-python>=4.5.0
11
+
12
+ # Qwen2-VL specific
13
+ qwen-vl-utils>=0.0.1
14
+
15
+ # Optional: For better performance
16
+ flash-attn>=2.0.0
17
+ deepspeed>=0.10.0
18
+
19
+ # Utilities
20
+ numpy>=1.21.0
21
+ requests>=2.25.0
22
+