am commited on
Commit
4e42180
·
1 Parent(s): f2960e2
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -15,14 +15,15 @@ from qwen_vl_utils import process_vision_info, fetch_image
15
 
16
 
17
  # pretrained_model_name_or_path=os.environ.get("MODEL", "amrn/testmodel2")
18
- pretrained_model_name_or_path=os.environ.get("MODEL", "amrn/dsv5mx3")
 
19
 
20
  auth_token = os.environ.get("HF_TOKEN") or True
21
  DEFAULT_PROMPT = "Find abnormalities and support devices."
22
 
23
  model = AutoModelForImageTextToText.from_pretrained(
24
  pretrained_model_name_or_path=pretrained_model_name_or_path,
25
- torch_dtype=torch.bfloat16,
26
  # attn_implementation="flash_attention_2",
27
  # trust_remote_code=True,
28
  token=auth_token
@@ -115,7 +116,7 @@ def model_inference(
115
 
116
 
117
 
118
- with gr.Blocks() as demo:
119
 
120
  send_btn = gr.Button("Send", variant="primary", render=False)
121
  textbox = gr.Textbox(show_label=False, placeholder="Enter your text here and press ENTER", render=False, submit_btn="Send")
@@ -143,10 +144,12 @@ with gr.Blocks() as demo:
143
  chat_interface = gr.ChatInterface(fn=model_inference,
144
  # title='title', description='description',
145
  type="messages",
146
- chatbot=gr.Chatbot(type="messages", label="AI", render_markdown=True, sanitize_html=False, allow_tags=True, height=800, container=False, show_share_button=False)
147
- , textbox=textbox,
 
148
  additional_inputs=image_input,
149
  multimodal=False,
 
150
  )
151
 
152
  # Clear chat history when an example is selected (keep example-populated inputs intact)
 
15
 
16
 
17
  # pretrained_model_name_or_path=os.environ.get("MODEL", "amrn/testmodel2")
18
+ # pretrained_model_name_or_path=os.environ.get("MODEL", "amrn/dsv5mx3")
19
+ pretrained_model_name_or_path=os.environ.get("MODEL", "amrn/gmdsv5mx3")
20
 
21
  auth_token = os.environ.get("HF_TOKEN") or True
22
  DEFAULT_PROMPT = "Find abnormalities and support devices."
23
 
24
  model = AutoModelForImageTextToText.from_pretrained(
25
  pretrained_model_name_or_path=pretrained_model_name_or_path,
26
+ dtype=torch.bfloat16,
27
  # attn_implementation="flash_attention_2",
28
  # trust_remote_code=True,
29
  token=auth_token
 
116
 
117
 
118
 
119
+ with gr.Blocks(fill_height=True) as demo:
120
 
121
  send_btn = gr.Button("Send", variant="primary", render=False)
122
  textbox = gr.Textbox(show_label=False, placeholder="Enter your text here and press ENTER", render=False, submit_btn="Send")
 
144
  chat_interface = gr.ChatInterface(fn=model_inference,
145
  # title='title', description='description',
146
  type="messages",
147
+ # chatbot=gr.Chatbot(type="messages", label="AI", render_markdown=True, sanitize_html=False, allow_tags=True, height=800, container=False, show_share_button=False),
148
+ chatbot=gr.Chatbot(type="messages", label="AI", render_markdown=True, sanitize_html=False, allow_tags=True, height=None, min_height=400, max_height=900, scale=1, container=False, show_share_button=False),
149
+ textbox=textbox,
150
  additional_inputs=image_input,
151
  multimodal=False,
152
+ fill_height=True,
153
  )
154
 
155
  # Clear chat history when an example is selected (keep example-populated inputs intact)