arjunanand13 commited on
Commit
f542f93
·
verified ·
1 Parent(s): 05b37e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers import AutoProcessor, Idefics2ForConditionalGeneration, AutoMo
3
  import subprocess
4
  import torch
5
  from peft import LoraConfig
6
- from transformers import BitsAndBytesConfig
7
 
8
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
9
  DEVICE = "cuda:0"
@@ -18,7 +18,7 @@ bnb_config = BitsAndBytesConfig(
18
  )
19
 
20
  model = AutoModelForPreTraining.from_pretrained("HuggingFaceM4/idefics2-8b",quantization_config=bnb_config)
21
- # model = Idefics2ForConditionalGeneration.from_pretrained("HuggingFaceM4/idefics2-8b",quantization_config=bnb_config)
22
 
23
  # if USE_QLORA or USE_LORA:
24
  # lora_config = LoraConfig(
 
3
  import subprocess
4
  import torch
5
  from peft import LoraConfig
6
+ # from transformers import BitsAndBytesConfig
7
 
8
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
9
  DEVICE = "cuda:0"
 
18
  )
19
 
20
  model = AutoModelForPreTraining.from_pretrained("HuggingFaceM4/idefics2-8b",quantization_config=bnb_config)
21
+ # model = Idefics2ForConditionalGeneration.from_pretrained("HuggingFaceM4/idefics2-8b",load_in_4bit=True)
22
 
23
  # if USE_QLORA or USE_LORA:
24
  # lora_config = LoraConfig(