Tharjama commited on
Commit
323263c
·
verified ·
1 Parent(s): d2f06ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -15,17 +15,17 @@ import re
15
  import html
16
  import json
17
 
18
- processor = AutoProcessor.from_pretrained("allenai/olmOCR-2-7B-1025-FP8")
19
- model = AutoModelForVision2Seq.from_pretrained("allenai/olmOCR-2-7B-1025-FP8",
20
- torch_dtype=torch.bfloat16,
21
- device_map="auto" if device == "cuda" else None,
22
- low_cpu_mem_usage=True
23
- )
24
-
25
- #model = AutoModelForVision2Seq.from_pretrained("allenai/olmOCR-2-7B-1025-FP8", torch_dtype=torch.bfloat16).eval()
26
- #processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
27
- #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
- #model.to(device)
29
 
30
 
31
  #model = Qwen2VLForConditionalGeneration.from_pretrained(
 
15
  import html
16
  import json
17
 
18
+ #processor = AutoProcessor.from_pretrained("allenai/olmOCR-2-7B-1025-FP8")
19
+ #model = AutoModelForVision2Seq.from_pretrained("allenai/olmOCR-2-7B-1025-FP8",
20
+ # torch_dtype=torch.bfloat16,
21
+ # device_map="auto" if device == "cuda" else None,
22
+ # low_cpu_mem_usage=True
23
+ # )
24
+
25
+ model = AutoModelForVision2Seq.from_pretrained("allenai/olmOCR-2-7B-1025-FP8", torch_dtype=torch.bfloat16).eval()
26
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
27
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
+ model.to(device)
29
 
30
 
31
  #model = Qwen2VLForConditionalGeneration.from_pretrained(