farrell236 commited on
Commit
d83e1a8
·
verified ·
1 Parent(s): 7c4baf4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +189 -47
app.py CHANGED
@@ -1,64 +1,206 @@
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
27
 
28
- response = ""
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
 
38
 
39
- response += token
40
- yield response
 
41
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
1
+ import os
2
+ import time
3
+ import torch
4
+ import requests
5
+
6
+ from PIL import Image
7
+ from collections.abc import Iterator
8
+ from threading import Thread
9
+
10
  import gradio as gr
11
+ from gradio import FileData
12
 
13
+ from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer
14
+ from qwen_vl_utils import process_vision_info
15
+
16
+ DESCRIPTION = """\
17
+ # Qwen2.5-VL-32B-Instruct
18
  """
 
 
 
19
 
20
+ MAX_MAX_NEW_TOKENS = 2048
21
+ DEFAULT_MAX_NEW_TOKENS = 1024
22
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
23
 
24
+ model_id = 'Qwen/Qwen2.5-VL-3B-Instruct'
25
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
26
+ model_id,
27
+ # torch_dtype=torch.bfloat16,
28
+ # attn_implementation="flash_attention_2",
29
+ device_map="auto"
30
+ )
31
+ processor = AutoProcessor.from_pretrained(model_id)
 
32
 
 
 
 
 
 
33
 
34
+ import base64
35
+ from PIL import Image
36
+ import io
37
 
38
+ # Function to encode the image (scaled down by half)
39
+ def encode_image(image_path, scale=0.25):
40
+ with Image.open(image_path) as img:
41
+ # Resize image to half its size
42
+ new_size = (int(img.width * scale), int(img.height * scale))
43
+ img = img.resize(new_size)
44
 
45
+ # Save the resized image to a bytes buffer
46
+ buffer = io.BytesIO()
47
+ img.save(buffer, format="JPEG") # Change format if needed (e.g., JPEG)
48
+ buffer.seek(0)
49
+
50
+ # Encode to base64
51
+ return base64.b64encode(buffer.read()).decode('utf-8')
52
+
53
+
54
+ def generate(
55
+ message: str,
56
+ history: list[dict],
57
+ max_new_tokens: int = 1024,
58
+ temperature: float = 0.6,
59
+ top_p: float = 0.9,
60
+ top_k: int = 50,
61
+ num_beams: int = 1,
62
+ repetition_penalty: float = 1.2,
63
+ ) -> Iterator[str]:
64
+
65
+ txt = message["text"]
66
+ ext_buffer = f"{txt}"
67
+
68
+ messages= []
69
+ images = []
70
+
71
+
72
+ for i, msg in enumerate(history):
73
+ if isinstance(msg[0], tuple):
74
+ print('HIT2', msg[0])
75
+ messages.append({"role": "user", "content": [
76
+ {"type": "text", "text": history[i+1][0]},
77
+ {"type": "image", "image": f"data:image/jpeg;base64,{encode_image(msg[0][0])}"}
78
+ ]})
79
+ messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
80
+ elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
81
+ # messages are already handled
82
+ pass
83
+ elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
84
+ messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
85
+ messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
86
+
87
+ # add current message
88
+ if len(message["files"]) == 1:
89
+
90
+ if isinstance(message["files"][0], str): # examples
91
+ base64_image = encode_image(message["files"][0])
92
+ else: # regular input
93
+ base64_image = encode_image(message["files"][0]["path"])
94
+ messages.append({"role": "user", "content": [
95
+ {"type": "text", "text": txt},
96
+ {"type": "image", "image": f"data:image/jpeg;base64,{base64_image}"}]})
97
+ else:
98
+ messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
99
+
100
+
101
+ texts = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
102
+ image_inputs, video_inputs = process_vision_info(messages)
103
+ inputs = processor(
104
+ text=[texts],
105
+ images=image_inputs,
106
+ videos=video_inputs,
107
+ padding=True,
108
+ return_tensors="pt",
109
+ ).to("cuda")
110
+ streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
111
+
112
+ generation_kwargs = dict(
113
+ inputs,
114
+ streamer=streamer,
115
+ max_new_tokens=max_new_tokens,
116
+ do_sample=True,
117
  top_p=top_p,
118
+ top_k=top_k,
119
+ temperature=temperature,
120
+ num_beams=num_beams,
121
+ # repetition_penalty=repetition_penalty,
122
+ )
123
+ generated_text = ""
124
 
125
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
126
+ thread.start()
127
+ buffer = ""
128
 
129
+ for new_text in streamer:
130
+ buffer += new_text
131
+ generated_text_without_prompt = buffer
132
+ time.sleep(0.01)
133
+ yield buffer
134
 
135
+ demo = gr.ChatInterface(fn=generate, title="Multimodal Qwen", examples=[
136
+ [{"text": """\
137
+ You are a highly experienced ophthalmologist specializing in retinal diseases.
138
+ You will be shown a color fundus photograph of a patient's eye.
139
+ Your task is to identify key retinal features and return a structured response.
140
+ You must only respond in JSON format using the following fields:
141
+ - ADVAMD: 1 if advanced age-related macular degeneration is present, otherwise 0
142
+ - PIG: 1 if abnormal pigmentary is present, otherwise 0
143
+ - DRUS: 0 if no drusen or small drusen, 1 if intermediate or medium drusen, 2 if large drusen
144
+ - RPD: 1 if reticular pseudodrusen are present, otherwise 0
145
+ - NVAMD: 1 if neovascular AMD is present, otherwise 0
146
+ - GA: 1 if geographic atrophy is present, otherwise 0
147
+
148
+ Do not include any explanation, just return the JSON object.
 
 
 
 
149
 
150
+ Please assess this fundus image and return your findings in the specified JSON format.""",
151
+ "files":["./examples/ret-hem250-304.jpg"]},
152
+ 1024],
153
+ ],
154
+ textbox=gr.MultimodalTextbox(),
155
+ additional_inputs = [
156
+ gr.Slider(
157
+ label="Max new tokens",
158
+ minimum=1,
159
+ maximum=MAX_MAX_NEW_TOKENS,
160
+ step=1,
161
+ value=DEFAULT_MAX_NEW_TOKENS,
162
+ ),
163
+ gr.Slider(
164
+ label="Temperature",
165
+ minimum=0.1,
166
+ maximum=4.0,
167
+ step=0.1,
168
+ value=0.6,
169
+ ),
170
+ gr.Slider(
171
+ label="Top-p (nucleus sampling)",
172
+ minimum=0.05,
173
+ maximum=1.0,
174
+ step=0.05,
175
+ value=0.9,
176
+ ),
177
+ gr.Slider(
178
+ label="Top-k",
179
+ minimum=1,
180
+ maximum=1000,
181
+ step=1,
182
+ value=50,
183
+ ),
184
+ gr.Slider(
185
+ label="Beam Search",
186
+ minimum=1,
187
+ maximum=1,
188
+ step=1,
189
+ value=1,
190
+ ),
191
+ gr.Slider(
192
+ label="Repetition penalty",
193
+ minimum=1.0,
194
+ maximum=2.0,
195
+ step=0.05,
196
+ value=1.2,
197
+ ),
198
+ ],
199
+ cache_examples=False,
200
+ description=DESCRIPTION,
201
+ stop_btn="Stop Generation",
202
+ fill_height=True,
203
+ multimodal=True)
204
 
205
  if __name__ == "__main__":
206
  demo.launch()