prithivMLmods commited on
Commit
f96ecf2
·
verified ·
1 Parent(s): 3160dd2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -352,18 +352,17 @@ with gr.Blocks(theme=light_salmon_theme, css=css) as demo:
352
  gr.Markdown("## Output", elem_id="output-title")
353
  output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=10, show_copy_button=True)
354
  with gr.Accordion("(Result.md)", open=False):
355
- markdown_output = gr.Markdown(label="(Result.Md)")
356
- model_choice = gr.Radio(
 
 
 
 
357
  choices=["Lumian-VLR-7B-Thinking", "openbmb/MiniCPM-V-4", "Typhoon-OCR-3B", "DREX-062225-7B-exp", "olmOCR-7B-0225-preview"],
358
  label="Select Model",
359
  value="Lumian-VLR-7B-Thinking"
360
  )
361
- gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-Thinking/discussions)")
362
- gr.Markdown("> [MiniCPM-V 4.0](https://huggingface.co/openbmb/MiniCPM-V-4) is the latest efficient model in the MiniCPM-V series. The model is built based on SigLIP2-400M and MiniCPM4-3B with a total of 4.1B parameters. It inherits the strong single-image, multi-image and video understanding performance of MiniCPM-V 2.6 with largely improved efficiency. [Lumian-VLR-7B-Thinking](https://huggingface.co/prithivMLmods/Lumian-VLR-7B-Thinking) is a high-fidelity vision-language reasoning model built on Qwen2.5-VL-7B-Instruct, designed for fine-grained multimodal understanding, video reasoning, and document comprehension through explicit grounded reasoning.")
363
- gr.Markdown("> [olmOCR-7B-0225-preview](https://huggingface.co/allenai/olmOCR-7B-0225-preview) is a 7B parameter open large model designed for OCR tasks with robust text extraction, especially in complex document layouts. [Typhoon-ocr-3b](https://huggingface.co/scb10x/typhoon-ocr-3b) is a 3B parameter OCR model optimized for efficient and accurate optical character recognition in challenging conditions.")
364
- gr.Markdown("> [DREX-062225-exp](https://huggingface.co/prithivMLmods/DREX-062225-exp) is an experimental multimodal model emphasizing strong document reading and extraction capabilities combined with vision-language understanding to support detailed document parsing and reasoning tasks.")
365
- gr.Markdown("> ⚠️ Note: Video inference performance can vary significantly between models.")
366
-
367
  image_submit.click(
368
  fn=generate_image,
369
  inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
 
352
  gr.Markdown("## Output", elem_id="output-title")
353
  output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=10, show_copy_button=True)
354
  with gr.Accordion("(Result.md)", open=False):
355
+ markdown_output = gr.Markdown(label="(Result.Md)", latex_delimiters=[
356
+ {"left": "$$", "right": "$$", "display": True},
357
+ {"left": "$", "right": "$", "display": False}
358
+ ])
359
+
360
+ model_choice = gr.Dropdown(
361
  choices=["Lumian-VLR-7B-Thinking", "openbmb/MiniCPM-V-4", "Typhoon-OCR-3B", "DREX-062225-7B-exp", "olmOCR-7B-0225-preview"],
362
  label="Select Model",
363
  value="Lumian-VLR-7B-Thinking"
364
  )
365
+
 
 
 
 
 
366
  image_submit.click(
367
  fn=generate_image,
368
  inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],