THP2903 commited on
Commit
3d89d29
·
verified ·
1 Parent(s): 37a18be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -6,12 +6,16 @@ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoPro
6
  from qwen_vl_utils import process_vision_info
7
  import cv2
8
 
9
- # Define the directory containing your downloaded model files
10
- model_directory = "/path/to/your/local/model/files"
11
 
12
- # Load model weights directly from local files
13
- model = Qwen2VLForConditionalGeneration(config=model_directory)
14
- model.load_state_dict(torch.load(f"{model_directory}/model.safetensors"))
 
 
 
 
 
 
15
 
16
  # Load tokenizer from local files
17
  tokenizer = AutoTokenizer(
@@ -21,7 +25,7 @@ tokenizer = AutoTokenizer(
21
  )
22
 
23
  # Load processor directly from config
24
- processor_config = torch.load(f"{model_directory}/processor_config.json")
25
  processor = AutoProcessor.from_config(processor_config)
26
 
27
  # Set generation configuration directly
 
6
  from qwen_vl_utils import process_vision_info
7
  import cv2
8
 
 
 
9
 
10
+ # Đường dẫn đến thư mục chứa các file .safetensors và các file cấu hình
11
+ model_directory = "THP2903/erax_llm"
12
+
13
+ # Load model từ các file .safetensors shards trong thư mục đã tải
14
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
15
+ model_directory,
16
+ torch_dtype=torch.bfloat16, # Sử dụng torch.float16 nếu cần tiết kiệm bộ nhớ
17
+ device_map="auto"
18
+ )
19
 
20
  # Load tokenizer from local files
21
  tokenizer = AutoTokenizer(
 
25
  )
26
 
27
  # Load processor directly from config
28
+ processor_config = torch.load(f"{model_directory}/preprocessor_config.json")
29
  processor = AutoProcessor.from_config(processor_config)
30
 
31
  # Set generation configuration directly