prithivMLmods commited on
Commit
f4c001d
·
verified ·
1 Parent(s): 8c619b8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -6
README.md CHANGED
@@ -25,11 +25,18 @@ Previously, inference with the model [https://huggingface.co/deepseek-ai/DeepSee
25
  This page, which includes the model weights and corrected configuration, fixed the issue and allowed `transformers` inference to run smoothly.
26
 
27
  > [!note]
28
- Last updated: 3:00 PM (IST), October 29, 2025.
29
 
30
- > [!note]
31
- The latest transformers version used as of the above date is `transformers==4.57.1`, the torch version `2.6.0+cu124`, torch.version.cuda = `12.4` and and tested on the device `NVIDIA H200 MIG 3g.71gb`. You can use or opt out of various attention implementations, such as flash_attention or sdpa, for optimization or standardization = We can also opt out the attention implementation if needed.
 
 
 
 
32
 
 
 
 
33
  ## Quick Start with Transformers
34
 
35
  > [!note]
@@ -262,7 +269,7 @@ def process_ocr_task(image, model_size, task_type, ref_text):
262
 
263
  return text_result, result_image_pil
264
 
265
- with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
266
  gr.Markdown("# **DeepSeek OCR [exp]**", elem_id="main-title")
267
 
268
 
@@ -279,7 +286,7 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
279
  output_image = gr.Image(label="Layout Detection (If Any)", type="pil")
280
 
281
  with gr.Accordion("Note", open=False):
282
- gr.Markdown("Inference using Huggingface transformers on NVIDIA GPUs. This app is running with transformers version 4.57.1 and torch version 2.6.0.")
283
 
284
  def toggle_ref_text_visibility(task):
285
  return gr.Textbox(visible=True) if task == "Locate Object by Reference" else gr.Textbox(visible=False)
@@ -288,7 +295,7 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
288
  submit_btn.click(fn=process_ocr_task, inputs=[image_input, model_size, task_type, ref_text_input], outputs=[output_text, output_image])
289
 
290
  if __name__ == "__main__":
291
- demo.queue(max_size=20).launch(share=True)
292
  ```
293
 
294
  ---
 
25
  This page, which includes the model weights and corrected configuration, fixed the issue and allowed `transformers` inference to run smoothly.
26
 
27
  > [!note]
28
+ Last updated: 3:00 PM (IST), DEC 04, 2025.
29
 
30
+ ```
31
+ transformers: 4.57.1
32
+ torch: 2.6.0+cu124 (or) the latest version (i.e., torch 2.9.0)
33
+ cuda: 12.4
34
+ device: NVIDIA H200 MIG 3g.71gb
35
+ ```
36
 
37
+ ```
38
+ Currently supported up to `transformers==4.57.2`. Support for Transformers v5 will be added soon.
39
+ ```
40
  ## Quick Start with Transformers
41
 
42
  > [!note]
 
269
 
270
  return text_result, result_image_pil
271
 
272
+ with gr.Blocks() as demo:
273
  gr.Markdown("# **DeepSeek OCR [exp]**", elem_id="main-title")
274
 
275
 
 
286
  output_image = gr.Image(label="Layout Detection (If Any)", type="pil")
287
 
288
  with gr.Accordion("Note", open=False):
289
+ gr.Markdown("Inference using Huggingface transformers on NVIDIA GPUs. This app is running with transformers version 4.57.2 and torch version 2.9.0.")
290
 
291
  def toggle_ref_text_visibility(task):
292
  return gr.Textbox(visible=True) if task == "Locate Object by Reference" else gr.Textbox(visible=False)
 
295
  submit_btn.click(fn=process_ocr_task, inputs=[image_input, model_size, task_type, ref_text_input], outputs=[output_text, output_image])
296
 
297
  if __name__ == "__main__":
298
+ demo.queue(max_size=20).launch(css=css, theme=steel_blue_theme, share=True)
299
  ```
300
 
301
  ---