damerajee commited on
Commit
3640907
·
verified ·
1 Parent(s): 8671a8d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -0
README.md CHANGED
@@ -52,6 +52,10 @@ print("device_index:",device_index)
52
  base_model = PaliGemmaForConditionalGeneration.from_pretrained("BhashaAI/ViLaH",device_map={"": device_index},torch_dtype=torch.float16,low_cpu_mem_usage=True)
53
  processor = AutoProcessor.from_pretrained("BhashaAI/ViLaH")
54
 
 
 
 
 
55
  MAX_LENGTH = 500
56
  # Autoregressively generate
57
  # We use greedy decoding here, for more fancy methods see https://huggingface.co/blog/how-to-generate
@@ -87,6 +91,10 @@ quantization_config = BitsAndBytesConfig(load_in_4bit=True)
87
  base_model = PaliGemmaForConditionalGeneration.from_pretrained("BhashaAI/ViLaH",device_map={"": device_index},quantization_config=quantization_config,torch_dtype=torch.float16,low_cpu_mem_usage=True)
88
  processor = AutoProcessor.from_pretrained("BhashaAI/ViLaH")
89
 
 
 
 
 
90
  MAX_LENGTH = 500
91
  # Autoregressively generate
92
  # We use greedy decoding here, for more fancy methods see https://huggingface.co/blog/how-to-generate
 
52
  base_model = PaliGemmaForConditionalGeneration.from_pretrained("BhashaAI/ViLaH",device_map={"": device_index},torch_dtype=torch.float16,low_cpu_mem_usage=True)
53
  processor = AutoProcessor.from_pretrained("BhashaAI/ViLaH")
54
 
55
+ inputs = processor(text=text, images=test_image, return_tensors="pt").to("cuda")
56
+ for k,v in inputs.items():
57
+ print(k,v.shape)
58
+
59
  MAX_LENGTH = 500
60
  # Autoregressively generate
61
  # We use greedy decoding here, for more fancy methods see https://huggingface.co/blog/how-to-generate
 
91
  base_model = PaliGemmaForConditionalGeneration.from_pretrained("BhashaAI/ViLaH",device_map={"": device_index},quantization_config=quantization_config,torch_dtype=torch.float16,low_cpu_mem_usage=True)
92
  processor = AutoProcessor.from_pretrained("BhashaAI/ViLaH")
93
 
94
+ inputs = processor(text=text, images=test_image, return_tensors="pt").to("cuda")
95
+ for k,v in inputs.items():
96
+ print(k,v.shape)
97
+
98
  MAX_LENGTH = 500
99
  # Autoregressively generate
100
  # We use greedy decoding here, for more fancy methods see https://huggingface.co/blog/how-to-generate