anhartit-chetan commited on
Commit
9ea2744
·
1 Parent(s): 6b849c6

Set processor patch size

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -7,7 +7,9 @@ import torch
7
  # model_id = "llava-hf/llava-1.5-7b-hf" # Exceeding 16 GB Memory
8
  # model_id = "llava-hf/llava-1.5-7b-hf-int4" #Does not exists
9
  model_id = "bczhou/tiny-llava-v1-hf"
 
10
  processor = AutoProcessor.from_pretrained(model_id)
 
11
  model = LlavaForConditionalGeneration.from_pretrained(
12
  model_id,
13
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
 
7
  # model_id = "llava-hf/llava-1.5-7b-hf" # Exceeding 16 GB Memory
8
  # model_id = "llava-hf/llava-1.5-7b-hf-int4" #Does not exists
9
  model_id = "bczhou/tiny-llava-v1-hf"
10
+
11
  processor = AutoProcessor.from_pretrained(model_id)
12
+ processor.patch_size = 14 # ✅ Fix: set manually
13
  model = LlavaForConditionalGeneration.from_pretrained(
14
  model_id,
15
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,