ColdSlim commited on
Commit
6bd5d6c
·
verified ·
1 Parent(s): 23f21e6

Delete app.py.old

Browse files
Files changed (1) hide show
  1. app.py.old +0 -226
app.py.old DELETED
@@ -1,226 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
4
- from PIL import Image
5
- import logging
6
-
7
- # Configure logging
8
- logging.basicConfig(level=logging.INFO)
9
- logger = logging.getLogger(__name__)
10
-
11
- # Global variables for model and processor
12
- model = None
13
- processor = None
14
-
15
- def load_model():
16
- """Load the fine-tuned dermatology model"""
17
- global model, processor
18
-
19
- try:
20
- # Load the merged model (replace with your actual model path)
21
- model_name = "ColdSlim/Dermatology-Qwen2.5-VL-3B" # Update with your actual model name
22
-
23
- logger.info(f"Loading model: {model_name}")
24
- processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
25
- model = Qwen2VLForConditionalGeneration.from_pretrained(
26
- model_name,
27
- torch_dtype=torch.bfloat16,
28
- device_map="auto",
29
- trust_remote_code=True
30
- )
31
-
32
- logger.info("Model loaded successfully!")
33
- return True
34
-
35
- except Exception as e:
36
- logger.error(f"Error loading model: {e}")
37
- return False
38
-
39
- def analyze_skin_condition(image, question="Describe this skin condition in detail."):
40
- """Analyze skin condition from uploaded image"""
41
- global model, processor
42
-
43
- if model is None or processor is None:
44
- return "❌ Model not loaded. Please wait for the model to load or contact the administrator."
45
-
46
- if image is None:
47
- return "❌ Please upload an image first."
48
-
49
- try:
50
- # Prepare the conversation
51
- messages = [
52
- {
53
- "role": "user",
54
- "content": [
55
- {"type": "image", "image": image},
56
- {"type": "text", "text": question}
57
- ]
58
- }
59
- ]
60
-
61
- # Process the input
62
- text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
63
- image_inputs, video_inputs = processor.process_vision_info(messages)
64
-
65
- inputs = processor(
66
- text=[text],
67
- images=image_inputs,
68
- videos=video_inputs,
69
- padding=True,
70
- return_tensors="pt"
71
- )
72
-
73
- # Move inputs to the same device as model
74
- inputs = {k: v.to(model.device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
75
-
76
- # Generate response
77
- with torch.no_grad():
78
- generated_ids = model.generate(
79
- **inputs,
80
- max_new_tokens=512,
81
- do_sample=True,
82
- temperature=0.7,
83
- top_p=0.9,
84
- pad_token_id=processor.tokenizer.eos_token_id
85
- )
86
-
87
- # Decode the response
88
- generated_ids_trimmed = [
89
- out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
90
- ]
91
- output_text = processor.batch_decode(
92
- generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
93
- )[0]
94
-
95
- return output_text
96
-
97
- except Exception as e:
98
- logger.error(f"Error during inference: {e}")
99
- return f"❌ Error analyzing image: {str(e)}"
100
-
101
- def create_interface():
102
- """Create the Gradio interface"""
103
-
104
- # Load model on startup
105
- model_loaded = load_model()
106
-
107
- with gr.Blocks(
108
- title="Dermatology AI Assistant",
109
- theme=gr.themes.Soft(),
110
- css="""
111
- .gradio-container {
112
- max-width: 1200px !important;
113
- margin: auto !important;
114
- }
115
- .main-header {
116
- text-align: center;
117
- margin-bottom: 2rem;
118
- }
119
- .warning-box {
120
- background-color: #fff3cd;
121
- border: 1px solid #ffeaa7;
122
- border-radius: 8px;
123
- padding: 1rem;
124
- margin: 1rem 0;
125
- }
126
- """
127
- ) as demo:
128
-
129
- gr.HTML("""
130
- <div class="main-header">
131
- <h1>🩺 Dermatology AI Assistant</h1>
132
- <p>Powered by Qwen2.5-VL-3B fine-tuned for dermatology analysis</p>
133
- </div>
134
- """)
135
-
136
- # Warning message
137
- gr.HTML("""
138
- <div class="warning-box">
139
- <h3>⚠️ Medical Disclaimer</h3>
140
- <p>This AI assistant is for educational and research purposes only.
141
- It should not be used as a substitute for professional medical advice,
142
- diagnosis, or treatment. Always consult with a qualified healthcare
143
- provider for medical concerns.</p>
144
- </div>
145
- """)
146
-
147
- with gr.Row():
148
- with gr.Column(scale=1):
149
- # Image upload
150
- image_input = gr.Image(
151
- label="Upload Skin Image",
152
- type="pil",
153
- height=400
154
- )
155
-
156
- # Question input
157
- question_input = gr.Textbox(
158
- label="Question (Optional)",
159
- placeholder="Describe this skin condition in detail.",
160
- value="Describe this skin condition in detail.",
161
- lines=3
162
- )
163
-
164
- # Analyze button
165
- analyze_btn = gr.Button(
166
- "🔍 Analyze Skin Condition",
167
- variant="primary",
168
- size="lg"
169
- )
170
-
171
- # Example questions
172
- gr.HTML("""
173
- <h4>💡 Example Questions:</h4>
174
- <ul>
175
- <li>What type of skin condition is this?</li>
176
- <li>Describe the characteristics of this lesion.</li>
177
- <li>What are the potential causes of this skin issue?</li>
178
- <li>What should I know about this skin condition?</li>
179
- </ul>
180
- """)
181
-
182
- with gr.Column(scale=1):
183
- # Output
184
- output_text = gr.Textbox(
185
- label="AI Analysis",
186
- lines=15,
187
- max_lines=20,
188
- show_copy_button=True
189
- )
190
-
191
- # Examples
192
- gr.Examples(
193
- examples=[
194
- ["What type of skin condition is this?", "Describe this skin condition in detail."],
195
- ["What are the characteristics of this lesion?", "Describe this skin condition in detail."],
196
- ["What should I know about this skin issue?", "Describe this skin condition in detail."],
197
- ],
198
- inputs=[question_input, question_input],
199
- label="💡 Example Questions"
200
- )
201
-
202
- # Event handlers
203
- analyze_btn.click(
204
- fn=analyze_skin_condition,
205
- inputs=[image_input, question_input],
206
- outputs=output_text
207
- )
208
-
209
- # Model status
210
- if model_loaded:
211
- gr.HTML("<div style='text-align: center; color: green;'>✅ Model loaded successfully!</div>")
212
- else:
213
- gr.HTML("<div style='text-align: center; color: red;'>❌ Model loading failed. Please check the logs.</div>")
214
-
215
- return demo
216
-
217
- if __name__ == "__main__":
218
- # Create and launch the interface
219
- demo = create_interface()
220
- demo.launch(
221
- server_name="0.0.0.0",
222
- server_port=7860,
223
- share=False,
224
- show_error=True
225
- )
226
-