gtang666 commited on
Commit
ed1622f
·
verified ·
1 Parent(s): 320e0a3
.gitattributes CHANGED
@@ -1,35 +1,36 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ flash_attn-2.6.1+cu118torch2.4cxx11abiFALSE-cp39-cp39-linux_x86_64.whl filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,14 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
1
  ---
2
+ title: CalliDemo
3
+ emoji: 📊
4
+ colorFrom: red
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 5.15.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: gpl
11
+ short_description: A test demo of CalliReader
12
  ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ from inference import single_image_wrapped
4
+
5
+ # 调用 single_image_wrapped 函数的包装函数
6
+ def image_with_prompt_to_result(image, prompt):
7
+ # 将图片传递给 single_image_wrapped 函数,得到输出结果
8
+ result = single_image_wrapped(image, prompt)
9
+ return result
10
+
11
+ # 创建 Gradio 接口
12
+ iface = gr.Interface(
13
+ fn=image_with_prompt_to_result, # 调用的函数
14
+ inputs=[gr.Image(type="pil"), gr.Textbox(placeholder="输入提示词...")], # 输入:图片 + prompt
15
+ outputs="text", # 输出:识别结果(文本)
16
+ title="图片与提示词输入", # 界面标题
17
+ description="上传一张图片并输入提示词,获取识别结果", # 描述
18
+ )
19
+
20
+ # 启动应用
21
+ iface.launch()
config/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+
4
+ # 将folder1的路径添加到sys.path
5
+ sys.path.append(os.path.abspath(os.path.dirname(__file__)))
config/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (243 Bytes). View file
 
config/__pycache__/configu.cpython-39.pyc ADDED
Binary file (1.69 kB). View file
 
config/configu.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+
4
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
5
+
6
+ # 路径配置
7
+ VIT_MODEL_PATH = '/home/luoyx/InternVL/CalliReader/params/vit_model.pt'
8
+ MLP1_PATH = '/home/luoyx/InternVL/CalliReader/params/params/mlp1.pth'
9
+ TOK_EMBEDDING_PATH = '/home/luoyx/InternVL/CalliReader/params/token_embedding.pth'
10
+ TOKENIZER_PATH = 'InternVL'
11
+ NORM_PARAMS_PATH='/home/luoyx/InternVL/CalliReader/params/gauss_norm_mu_sigma.pth'
12
+ NORM_TOK_EMBEDDING_PATH='/home/luoyx/InternVL/CalliReader/params/gauss_norm.pth'
13
+ NEW_1000_TOK_EMBEDDING_PATH='/home/luoyx/InternVL/CalliReader/params/new1000_token_embedding.pth'
14
+ INTERNVL_PATH='InternVL'
15
+
16
+
17
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
18
+ IMAGENET_STD = (0.229, 0.224, 0.225)
19
+ SEED=42
20
+
21
+ # 训练配置
22
+ BATCH_SIZE = 256
23
+ USE_WARMUP=False
24
+ LR = 1e-4 # original 1e-4
25
+ WEIGHT_DECAY = 1e-5
26
+ WARMUP_STEPS = 2000 # *4 = total training steps
27
+ NUM_EPOCHS = 13
28
+ NUM_WORKERS = 4
29
+ TRAIN_INTER = 10
30
+ VAL_INTER = 500
31
+ DOWNSAMPLE_RATIO = 0.5
32
+ NUM_LAYERS=4
33
+ GRAD_ACCU = 1
34
+ MODEL_NAME = 'PERCEIVER'
35
+
36
+
37
+
38
+ # 数据路径
39
+
40
+ TRAIN_DATA_PATH = ""
41
+ VAL_DATA_PATH = ''
42
+ TEST_DATA_PATH = ''
43
+ TRAIN_RATIO = 1#0.556 #0.02
44
+ VAL_RATIO = 0.2#0.1
45
+
46
+ # 36000 steps 8 cards 20 epochs, ~ 0.52 data ratio
47
+
48
+ # LOGS andSAVE_NAME
49
+ # 每一次跑新的实验切记一定需要修改!!!!
50
+ LOG_NAME = ''
51
+ SAVE_NAME = LOG_NAME+'.pth'
52
+
53
+ # DDP
54
+ WORLD_SIZE = torch.cuda.device_count()
55
+ # 如果我们要加载训练一半的模型,两个都不能是none!!
56
+ # LOAD CHECKPOINT AND RESUME TRAINING
57
+ # PERCEIVER_CHECKPOINT ="/home/luoyx/InternVL/CalliReader/params/perceiver_4_n01_1e-4_new.pth"
58
+ # RESUME = 26500
59
+ PERCEIVER_CHECKPOINT ='/home/luoyx/InternVL/CalliReader/params/callialign.pth'
60
+ RESUME = 50000
61
+ ORDERFORMER_CHECKPOINT='/home/luoyx/InternVL/CalliReader/params/orderformer.pth'
62
+ YOLO_CHECKPOINT="/home/luoyx/InternVL/CalliReader/params/best.pt"
flash_attn-2.6.1+cu118torch2.4cxx11abiFALSE-cp39-cp39-linux_x86_64.whl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeeb772af0a920b80418e62e7468e2ca96038264ce604a3d60412f7df4f7e508
3
+ size 200066047
inference.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import random
3
+ import numpy as np
4
+ import torch
5
+ from PIL import Image
6
+ Image.MAX_IMAGE_PIXELS = None
7
+ from transformers import AutoModel, AutoTokenizer
8
+ import opencc
9
+ from ultralytics import YOLO
10
+ from config.configu import *
11
+ from utils.utils import *
12
+ import logging
13
+ import argparse
14
+
15
+ def setup_logger(log_file):
16
+ logging.basicConfig(filename=log_file, level=logging.INFO, format='%(asctime)s - %(message)s')
17
+ logger = logging.getLogger()
18
+ return logger
19
+
20
+ def set_seed(seed):
21
+ random.seed(seed)
22
+ np.random.seed(seed)
23
+ torch.manual_seed(seed)
24
+
25
+ if torch.cuda.is_available():
26
+ torch.cuda.manual_seed(seed)
27
+ torch.cuda.manual_seed_all(seed)
28
+
29
+ torch.backends.cudnn.deterministic = True
30
+ torch.backends.cudnn.benchmark = False
31
+
32
+ cc = opencc.OpenCC('t2s.json')
33
+ set_seed(SEED)
34
+ converter_t2s = opencc.OpenCC('t2s')
35
+
36
+
37
+ def single_rec(model,tokenizer,detect_model,generation_config,image_path,prompt,use_p,hard_vq,drop_zero,repetition_penalty,verbose):
38
+ response, history = model.chat_ocr(tokenizer, detect_model,image_path, prompt, generation_config,
39
+ use_p=use_p,
40
+ hard_vq=hard_vq,
41
+ drop_zero=drop_zero,repetition_penalty=repetition_penalty,return_history=True,verbose=verbose)
42
+ print(f'User: {prompt}\nAssistant: {response}')
43
+
44
+ def folder_rec(model,tokenizer,detect_model,generation_config,folder_path,prompt,save_name,use_p,hard_vq,drop_zero,repetition_penalty,verbose):
45
+ results=[]
46
+
47
+ all_images=get_image_paths(folder_path)
48
+ for pic in tqdm(all_images):
49
+ pic_path=os.path.join(folder_path,pic)
50
+ try:
51
+ response, history = model.chat_ocr(tokenizer, detect_model,pic_path, prompt, generation_config,
52
+ use_p=use_p,
53
+ hard_vq=hard_vq,
54
+ drop_zero=drop_zero,repetition_penalty=repetition_penalty,return_history=True,verbose=verbose)
55
+ except Exception as e:
56
+ print(f"An error has occured:\n{e}")
57
+ response="ERROR!"
58
+ print(f'User: {prompt}\nAssistant: {response}')
59
+ results.append({"imagePath":pic_path,'prompt':prompt,'response':response})
60
+ if not save_name.endswith('json'):
61
+ save_name+='_result.json'
62
+ save_json(save_name,results)
63
+
64
+
65
+ def main():
66
+ parser = argparse.ArgumentParser(description="args for inference task")
67
+
68
+ parser.add_argument('--tgt', type=str,help='Recognition target')
69
+ parser.add_argument('--prompt', type=str,default='这幅书法作品内容是什么?',help='Prompt for recognition')
70
+ parser.add_argument('--save_name',type=str,default="recognition.json",help="Storage of results if multiple images recognition mode")
71
+
72
+ parser.add_argument('--use_p', type=bool, default=True,help='Decide the usage of perceiver resampler')
73
+ parser.add_argument('--hard_vq', type=bool, default=False,help='Decide the usage of closest similarity match')
74
+ parser.add_argument('--drop_zero', type=bool, default=False,help='Decide the deletion of zero padding in pseudo tokens')
75
+ parser.add_argument('--verbose', type=bool, default=False,help='Decide the output of extra information')
76
+ parser.add_argument('--repetition_penalty', type=float, default=1.0,help='Repetition penalty for generation')
77
+
78
+
79
+ args = parser.parse_args()
80
+
81
+ if not isinstance(args.tgt,str):
82
+ raise ValueError(f"The target should a string, not a instance of {type(args.tgt)}!")
83
+
84
+
85
+ model = AutoModel.from_pretrained(
86
+ INTERNVL_PATH,
87
+ torch_dtype=torch.bfloat16,
88
+ low_cpu_mem_usage=True,
89
+ trust_remote_code=True).eval().cuda()
90
+ tokenizer = AutoTokenizer.from_pretrained(INTERNVL_PATH, trust_remote_code=True)
91
+
92
+ generation_config = dict(
93
+ num_beams=1,
94
+ max_new_tokens=1024,
95
+ do_sample=False,
96
+ )
97
+
98
+ detect_model=YOLO(YOLO_CHECKPOINT)
99
+ if is_image(args.tgt):
100
+ print("Single image recognition mode.")
101
+ single_rec(
102
+ model,
103
+ tokenizer,
104
+ detect_model,
105
+ generation_config,
106
+ args.tgt,
107
+ args.prompt,
108
+ args.use_p,
109
+ args.hard_vq,
110
+ args.drop_zero,
111
+ args.repetition_penalty,
112
+ args.verbose)
113
+ elif os.path.isdir(args.tgt):
114
+ print("Multiple images recognition mode")
115
+ os.makedirs('results',exist_ok=True)
116
+ folder_rec(
117
+ model,
118
+ tokenizer,
119
+ detect_model,
120
+ generation_config,
121
+ args.tgt,
122
+ args.prompt,
123
+ os.path.join('results',args.save_name),
124
+ args.use_p,
125
+ args.hard_vq,
126
+ args.drop_zero,
127
+ args.repetition_penalty,
128
+ args.verbose)
129
+ else:
130
+ raise ValueError(f"The target should be either a image path or a folder that contain images!")
131
+
132
+ def single_image_wrapped(image,prompts):
133
+ model = AutoModel.from_pretrained(
134
+ INTERNVL_PATH,
135
+ torch_dtype=torch.bfloat16,
136
+ low_cpu_mem_usage=True,
137
+ trust_remote_code=True).eval().cuda()
138
+ tokenizer = AutoTokenizer.from_pretrained(INTERNVL_PATH, trust_remote_code=True)
139
+
140
+ generation_config = dict(
141
+ num_beams=1,
142
+ max_new_tokens=1024,
143
+ do_sample=False,
144
+ )
145
+ detect_model=YOLO(YOLO_CHECKPOINT)
146
+
147
+
148
+ temp_dir = "temp_images"
149
+ os.makedirs(temp_dir, exist_ok=True)
150
+
151
+ # 获取图片的文件名,并保存
152
+ temp_image_path = os.path.join(temp_dir, "uploaded_image.png")
153
+ image.save(temp_image_path)
154
+ single_rec(
155
+ model,
156
+ tokenizer,
157
+ detect_model,
158
+ generation_config,
159
+ temp_image_path,
160
+ prompts,
161
+ True,
162
+ False,
163
+ True,
164
+ 1.2,
165
+ False)
166
+ if __name__=='__main__':
167
+
168
+ main()
169
+
170
+
models/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+
4
+ # 将folder1的路径添加到sys.path
5
+ sys.path.append(os.path.abspath(os.path.dirname(__file__)))
6
+
models/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (262 Bytes). View file
 
models/__pycache__/model.cpython-39.pyc ADDED
Binary file (18.4 kB). View file
 
models/__pycache__/perceiver_resampler.cpython-39.pyc ADDED
Binary file (4.88 kB). View file
 
models/__pycache__/similarity.cpython-39.pyc ADDED
Binary file (1.74 kB). View file
 
models/model.py ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import sys
4
+ import os
5
+ import json
6
+
7
+ from collections import OrderedDict
8
+ project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
9
+ sys.path.append(project_root)
10
+
11
+ from transformers import AutoConfig
12
+ from InternVL.modeling_intern_vit import InternVisionModel
13
+ from .perceiver_resampler import PerceiverResampler, MLP
14
+ from config.configu import device, VIT_MODEL_PATH, MLP1_PATH, TOK_EMBEDDING_PATH, TOKENIZER_PATH,NORM_TOK_EMBEDDING_PATH,NORM_PARAMS_PATH
15
+
16
+
17
+ def load_json(pth):
18
+ """加载json文件"""
19
+ with open(pth, 'r', encoding='utf-8') as f:
20
+ data = json.load(f)
21
+ return data
22
+ def load_vision_model(location='cpu'):
23
+ vit_config = AutoConfig.from_pretrained(TOKENIZER_PATH, trust_remote_code=True).vision_config
24
+ vision_model = InternVisionModel(vit_config).to(device).to(torch.bfloat16)
25
+ state_dict = torch.load(VIT_MODEL_PATH, weights_only=True, map_location=location)
26
+ incompatible_keys = vision_model.load_state_dict(state_dict)
27
+ if incompatible_keys.unexpected_keys:
28
+ print(f"Unexpected keys: {incompatible_keys.unexpected_keys}")
29
+ if incompatible_keys.missing_keys:
30
+ print(f"Missing keys: {incompatible_keys.missing_keys}")
31
+ print("vision model已加载")
32
+ return vision_model
33
+
34
+ def load_mlp1(downsample_ratio, vit_hidden_size=1024, llm_hidden_size=4096,location='cpu'):
35
+ mlp1 = nn.Sequential(
36
+ nn.LayerNorm(vit_hidden_size * int(1 / downsample_ratio) ** 2),
37
+ nn.Linear(vit_hidden_size * int(1 / downsample_ratio) ** 2, llm_hidden_size),
38
+ nn.GELU(),
39
+ nn.Linear(llm_hidden_size, llm_hidden_size)
40
+ ).to(device).to(torch.bfloat16)
41
+ mlp1.load_state_dict(torch.load(MLP1_PATH, weights_only=True, map_location=location))
42
+ print("mlp1已加载")
43
+ return mlp1
44
+
45
+ def load_tok_embeddings(path=TOK_EMBEDDING_PATH,vocab_size=92553, llm_hidden_size=4096,location='cpu'):
46
+ tok_embeddings = nn.Embedding(vocab_size, llm_hidden_size, padding_idx=2).to(device).to(torch.bfloat16)
47
+ tok_embeddings.load_state_dict(torch.load(path, weights_only=True, map_location=location))
48
+ print("tok_embedding已加载")
49
+ return tok_embeddings
50
+
51
+
52
+
53
+ def load_normed_tok_embeddings(vocab_size=92553, llm_hidden_size=4096,load_checkboard=False,location="cpu"):
54
+ tok_embeddings = nn.Embedding(vocab_size, llm_hidden_size, padding_idx=2).to(device).to(torch.bfloat16)
55
+ tok_embeddings.load_state_dict(torch.load(NORM_TOK_EMBEDDING_PATH, weights_only=True, map_location=location))
56
+ print("norm tok_embedding已加载")
57
+ if load_checkboard:
58
+ checkboard_norm=torch.load(NORM_PARAMS_PATH) # (voc_size, 2) mu sigma pred * sigma + mu (逐行)
59
+ print("归一化参数(mu, sigma)已加载")
60
+ return tok_embeddings,checkboard_norm
61
+ return tok_embeddings
62
+
63
+
64
+ def load_tokenizer():
65
+ from transformers import AutoTokenizer
66
+ tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True)
67
+ return tokenizer
68
+
69
+ def load_perceiver_resampler(path=None, num_layers=4, checkpoint=None):
70
+ model = PerceiverResampler(dim=4096, depth = num_layers).to(device).to(torch.bfloat16)
71
+ if checkpoint == None and path!=None:
72
+ checkpoint = torch.load(path)
73
+ if path is not None:
74
+ print(f"Load from {path}")
75
+ if isinstance(checkpoint, dict):
76
+ if 'model_state_dict' in checkpoint.keys():
77
+ model.load_state_dict(checkpoint['model_state_dict'])
78
+ else:
79
+ raise FileNotFoundError("no key model_state_dict in ckpt")
80
+ else:
81
+ model.load_state_dict(checkpoint)
82
+ print(f"Model has a parameter scale of {sum(p.numel() for p in model.parameters())/1e9:.3f} B.")
83
+ return model
84
+
85
+ def load_mlp(path=None):
86
+ model = MLP(dim=256).to(device).to(torch.bfloat16)
87
+ if path is not None:
88
+ model.load_state_dict(torch.load(path))
89
+ print(f"Model has a parameter scale of {sum(p.numel() for p in model.parameters())/1e9:.3f} B.")
90
+ return model
91
+
92
+ def load_perceiver_resampler_2(model_path, num_layers=4,device=None):
93
+ if device==None:
94
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
95
+
96
+ # 初始化模型
97
+ model = PerceiverResampler(dim=4096,depth=num_layers)
98
+
99
+ # 加载预训练权重
100
+ state_dict = torch.load(model_path, map_location='cpu')
101
+
102
+ # 移除 state_dict 中的 `module.` 前缀
103
+ state_dict = torch.load(model_path,weights_only=False)
104
+ if 'model_state_dict' in state_dict.keys():
105
+ state_dict = state_dict['model_state_dict']
106
+
107
+ # 3. 处理 DDP 模型的情况(检查是否有 'module.' 前缀)
108
+ new_state_dict = OrderedDict()
109
+
110
+ for key, value in state_dict.items():
111
+ # 如果有 'module.' 前缀,则去掉它
112
+ if key.startswith('module.'):
113
+ new_key = key[len('module.'):]
114
+ else:
115
+ new_key = key
116
+ new_state_dict[new_key] = value
117
+ model = model.to_empty(device=device)
118
+ model.load_state_dict(new_state_dict)
119
+
120
+ # 将模型移动到目标设备
121
+
122
+
123
+ # 将模型转换为所需的数据类型
124
+ model = model.to(torch.bfloat16)
125
+ return model
126
+
127
+ def load_pretrained_resampler(checkpoint_path, num_layers=6):
128
+ model = load_perceiver_resampler(num_layers=num_layers)
129
+ checkpoint = torch.load(checkpoint_path, map_location=device)
130
+ #print(checkpoint.keys())
131
+ # 如果模型是通过 DDP 保存的,需要处理 'module.' 前缀
132
+ if 'module.' in list(checkpoint.keys())[0]:
133
+ print("load ddp Perceiver Resampler....")
134
+ #model = torch.nn.parallel.DistributedDataParallel(model)
135
+ model.load_state_dict(checkpoint)
136
+ elif 'module.' in list(checkpoint['model'].keys())[0]:
137
+ print("load ddp Perceiver Resampler....")
138
+ #model = torch.nn.parallel.DistributedDataParallel(model)
139
+ model.load_state_dict(checkpoint['model'])
140
+ else:
141
+ print("load Perseiver Resampler ...")
142
+ model.load_state_dict(checkpoint)
143
+ return model
144
+
145
+ def load_optimizer(optimizer, path, resume):
146
+ # 加载checkpoint
147
+
148
+ if resume:
149
+ #assert isinstance(ckpt, dict) and 'optimizer_state_dict' in ckpt
150
+ ckpt = torch.load(path)
151
+ if 'optimizer_state_dict' not in ckpt:
152
+ return optimizer
153
+ # 处理 DDP 模型的情况
154
+ optimizer_state_dict = ckpt['optimizer_state_dict']
155
+ new_optimizer_state_dict = OrderedDict()
156
+
157
+ for key, value in optimizer_state_dict.items():
158
+ if key.startswith('module.'):
159
+ new_key = key[len('module.'):]
160
+ else:
161
+ new_key = key
162
+ new_optimizer_state_dict[new_key] = value
163
+
164
+ optimizer.load_state_dict(new_optimizer_state_dict)
165
+
166
+ return optimizer
167
+
168
+ def load_scheduler(scheduler, path, resume):
169
+
170
+ if resume:
171
+ #assert isinstance(ckpt, dict) and 'scheduler_state_dict' in ckpt
172
+ # 加载checkpoint
173
+ ckpt = torch.load(path)
174
+ if 'scheduler_state_dict' not in ckpt:
175
+ return scheduler
176
+ # 处理 DDP 模型的情况
177
+ scheduler_state_dict = ckpt['scheduler_state_dict']
178
+ new_scheduler_state_dict = OrderedDict()
179
+ for key, value in scheduler_state_dict.items():
180
+ if key.startswith('module.'):
181
+ new_key = key[len('module.'):]
182
+ else:
183
+ new_key = key
184
+ new_scheduler_state_dict[new_key] = value
185
+
186
+ scheduler.load_state_dict(new_scheduler_state_dict)
187
+
188
+ return scheduler
189
+
190
+ import numpy as np
191
+ from tqdm import tqdm
192
+ import torch
193
+ import torch.nn as nn
194
+ import torch.optim as optim
195
+ from torch.utils.data import DataLoader, Dataset, random_split
196
+ class BoundingBoxDataset(Dataset):
197
+ """数据集class"""
198
+ def __init__(self, data, targets):
199
+ self.data = data
200
+ self.targets = targets
201
+
202
+ def __len__(self):
203
+ return len(self.data)
204
+
205
+ def __getitem__(self, idx):
206
+ x = self.data[idx]
207
+ y = self.targets[idx]
208
+ return x, y
209
+
210
+ class Transformer(nn.Module):
211
+ """核心的Transformer model,encoder only"""
212
+ def __init__(self, input_dim:int, model_dim:int, num_heads:int, num_layers:int,output_dim:int,norms=True):
213
+ super(Transformer, self).__init__()
214
+ self.embedding=nn.Linear(input_dim,model_dim)
215
+ if norms:
216
+ self.layer_norm = nn.LayerNorm(model_dim)
217
+ self.encoder_layer = nn.TransformerEncoderLayer(d_model=model_dim, nhead=num_heads,batch_first=True)
218
+ self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers,norm=self.layer_norm if norms==True else None)
219
+
220
+ self.decoder=nn.Linear(model_dim,output_dim)
221
+
222
+ def forward(self, x):
223
+ x=self.embedding(x)
224
+ x = self.transformer_encoder(x)
225
+ x=self.decoder(x)
226
+ return x
227
+
228
+ class OrderFormer:
229
+ """封装后的模型,实现数据加载,训练,测试,推理功能"""
230
+ def __init__(self, model_path=None,max_nums=300,input_dim=4, model_dim=256, num_heads=8, num_layers=4, output_dim=1,device=torch.device("cuda"),label_name="turn",norm=False):
231
+ self.model = Transformer(input_dim, model_dim, num_heads, num_layers, output_dim,norms=norm).to_empty(device=device)
232
+ if isinstance(model_path,str):
233
+ self.model.load_state_dict(torch.load(model_path))
234
+
235
+ self.device=device
236
+ self.max_nums=max_nums
237
+ self.input_dim=input_dim
238
+ self.label_name=label_name
239
+
240
+ def _get_all_jsons(self,folder_path):
241
+ """得到文件夹中的所有json文件路径"""
242
+ files = os.listdir(folder_path)
243
+ json_files = [folder_path+f for f in files if os.path.isfile(os.path.join(folder_path, f)) and f.endswith('json')]
244
+ return json_files
245
+
246
+ def _preprocess(self,datas):
247
+ """
248
+ data: SHOULD BE Consistent with labelme data format
249
+ return:
250
+ [
251
+ [
252
+ [x1,y1,x2,y2],label
253
+ ]
254
+ ...
255
+ ]
256
+ x,y:[0,1]
257
+ """
258
+ data=datas['shapes']
259
+ h=datas['imageHeight']
260
+ w=datas['imageWidth']
261
+ example=[]
262
+ X=[]
263
+ Y=[]
264
+ L=[]
265
+ for obj in data:
266
+ #记录顺序,横纵坐标
267
+ l=obj[self.label_name]
268
+ p=obj['points']
269
+ X.extend([p[0][0]/w,p[1][0]/w])
270
+ Y.extend([p[0][1]/h,p[1][1]/h])
271
+ L.append(l)
272
+ xmin=min(X)
273
+ ymin=min(Y)
274
+ #横纵坐标均减去最小值,保持平移不变性
275
+ X=np.array(X)-xmin
276
+ Y=np.array(Y)-ymin
277
+ for i in range(len(L)):
278
+ coord=[X[2*i],Y[2*i],X[2*i+1],Y[2*i+1]]
279
+ example.append([coord,L[i]])
280
+ return example
281
+ def _sort_boxes(self,boxes):
282
+ """以到(0,0)距离排序box,确保输入box是唯一的排列序列
283
+ boxes=[[[x1,y1,x2,y2],label],...]
284
+ label可以是标签,也可以是原始的bbox便于得到bbox和顺序的对应关系
285
+ """
286
+ return sorted(boxes,key=lambda x:((x[0][0]+x[0][2])/2)**2+((x[0][1]+x[0][3])/2)**2)
287
+
288
+ def _load_data(self,path,device=torch.device("cuda"),name='turn'):
289
+ """
290
+ 从json转为tensor的构造函数
291
+ Args:
292
+ path:jsons-jpgs所存在的文件夹
293
+ max_nums:单个样本中char的最大个数
294
+ name:取得char顺序指标的key
295
+ Return:
296
+
297
+ """
298
+ max_nums=self.max_nums
299
+ device=self.device
300
+ all_jsons=self._get_all_jsons(path)
301
+ raw=[]
302
+ for j in all_jsons:
303
+ datas=load_json(j)
304
+ example=self._preprocess(datas)
305
+ raw.append(example)
306
+ transformed_inputs=[]
307
+ transformed_labels=[]
308
+ originNs=[]#记录原序列的长度,用于从结果中得到序列
309
+ for item in raw:
310
+ item=self._sort_boxes(item)
311
+ originNs.append(len(item))
312
+ lst=[]
313
+ ls=[]
314
+ for x in item:
315
+ #lst=lst+[x1,y1,x2,y2]
316
+ lst.extend(x[0])
317
+ #ls记录label
318
+ ls.append(int(x[1]))
319
+ #pad全0序列和全0标签到指定的max_nums长度
320
+ lst.extend([0]*self.input_dim*(max_nums-len(item)))
321
+ ls.extend([0]*(max_nums-len(item)))
322
+
323
+ transformed_inputs.append(lst)
324
+ transformed_labels.append(ls)
325
+ return torch.tensor(transformed_inputs,dtype=torch.float32).reshape((-1,max_nums,self.input_dim)).to(device),torch.tensor(transformed_labels,dtype=torch.float32).reshape((-1,self.max_nums,1)).to(device),originNs
326
+
327
+ def _decode(self,output,N,batch_size=1):
328
+ """从输出的tensor解码得到排序"""
329
+ new_output=output.reshape((batch_size,-1))[:,:N]
330
+ sorted_indices = torch.argsort(new_output, dim=1)
331
+ ranks = torch.argsort(sorted_indices, dim=1)
332
+ return ranks + 1
333
+
334
+ def _get_acc(self,tensor1, tensor2):
335
+ """计算两个相同形状tensor数值相同的位置的占比"""
336
+ # Ensure the tensors are of the same shape
337
+ assert tensor1.shape == tensor2.shape, "Tensors must have the same shape"
338
+
339
+ # Create a boolean mask where the values are equal
340
+ equal_mask = tensor1 == tensor2
341
+
342
+ # Calculate the proportion of equal values
343
+ equal_count = torch.sum(equal_mask).item()
344
+ total_elements = torch.numel(tensor1)
345
+
346
+ proportion_equal = equal_count / total_elements
347
+
348
+ return proportion_equal
349
+
350
+
351
+ def train(self, path,batch_size=4,lr=0.0002,weight_decay=0,epochs=1000,verbose=True):
352
+ """训练函数"""
353
+ if verbose:
354
+ print("Loading dataset...")
355
+ data,labels,_=self._load_data(path=path,device=self.device,name=self.label_name)
356
+
357
+ #TODO :可指定的训练策略
358
+ optimizer = optim.AdamW(self.model.parameters(), lr=lr,weight_decay=weight_decay,amsgrad=True)
359
+ # scheduler=optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max=10)
360
+ scheduler=optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2, eta_min=1e-6)
361
+ criterion=torch.nn.MSELoss()
362
+
363
+ dataset = BoundingBoxDataset(data, labels)
364
+ dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
365
+ min_loss=float("inf")
366
+ if verbose:
367
+ print("Start training...")
368
+ for epoch in range(epochs):
369
+ losses=0
370
+ for batch_idx,(inputs, y) in enumerate(tqdm((dataloader))):
371
+ optimizer.zero_grad()
372
+ outputs = self.model(inputs)
373
+
374
+ loss = criterion(outputs, y)
375
+ loss.backward()
376
+ losses+=loss.item()
377
+ scheduler.step(epoch + batch_idx / len(dataloader))
378
+ optimizer.step()
379
+ #scheduler.step()
380
+
381
+ if verbose:
382
+ print(f"Epoch {epoch+1}/{epochs}, Loss: {losses/len(dataloader)}")
383
+ if losses/len(dataloader)<min_loss:
384
+ min_loss=losses/len(dataloader)
385
+ if verbose:
386
+ print("Saving best model...")
387
+ torch.save(self.model.state_dict(),'best.pth')
388
+
389
+
390
+ def eval(self, path,verbose=False):
391
+ """在数据集上测试,计算平均loss和mAP"""
392
+ testdata,testlabels,Ns=self._load_data(path=path,device=self.device,name=self.label_name)
393
+ dataset = BoundingBoxDataset(testdata, testlabels)
394
+ testloader=DataLoader(dataset,batch_size=1,shuffle=False)
395
+
396
+ self.model.eval()
397
+ losses=0
398
+ mAP=0
399
+ if verbose:
400
+ print("Evaluation...")
401
+ criterion = nn.MSELoss()
402
+ for i,(inputs, y) in enumerate(testloader):
403
+ outputs = self.model(inputs)
404
+ pred= self._decode(outputs,Ns[i])
405
+ gt=y.reshape((1,-1))[:,:Ns[i]]
406
+ loss = criterion(pred, gt)
407
+ acc=self._get_acc(pred,gt)
408
+ if verbose:
409
+
410
+ print("Pred:",pred)
411
+ print("GT:",gt)
412
+ print("loss= ",loss.item())
413
+ print("acc= ",acc,'\n')
414
+ losses+=loss.item()
415
+ #mAP+=1 if acc==1 else 0
416
+ mAP+=acc
417
+ print(f"Test MSELoss= {losses/len(testloader):.4f}\nTest mAP= {mAP/len(testloader):.4f}")
418
+
419
+ def predict(self,datas,jpg_path=None,save_path=None,verbose=False):
420
+ """
421
+ 进行单个数据的预测,如果有图片,保存路径,可以进行verbose可视化
422
+ 返回一个dict,key是顺序,value是box的位置
423
+ """
424
+ if save_path:
425
+ os.makedirs(save_path,exist_ok=True)
426
+ import time
427
+ st=time.time()
428
+ data=datas['shapes']
429
+ h=datas['imageHeight']
430
+ w=datas['imageWidth']
431
+ example=[]
432
+ X=[]
433
+ Y=[]
434
+ Ls=[]
435
+ for obj in data:
436
+ #记录顺序,横纵坐标
437
+ p=obj['points']
438
+ flat_p=[p[0][0],p[0][1],p[1][0],p[1][1]]
439
+ Ls.append(flat_p)
440
+ X.extend([p[0][0]/w,p[1][0]/w])
441
+ Y.extend([p[0][1]/h,p[1][1]/h])
442
+ xmin=min(X)
443
+ ymin=min(Y)
444
+ #横纵坐标均减去最小值,保持平移不变性
445
+ X=np.array(X)-xmin
446
+ Y=np.array(Y)-ymin
447
+ for i in range(len(data)):
448
+ coord=[X[2*i],Y[2*i],X[2*i+1],Y[2*i+1]]
449
+ example.append([coord,Ls[i]])
450
+ example=self._sort_boxes(example)
451
+ inputs=[]
452
+ labels=[]
453
+ for coord in example:
454
+ inputs.extend(coord[0])
455
+ labels.append(coord[1])
456
+ inputs.extend([0]*self.input_dim*(self.max_nums-len(example)))
457
+
458
+ x=torch.tensor(inputs,dtype=torch.bfloat16).reshape((-1,self.max_nums,self.input_dim)).to(self.device)
459
+
460
+ mstart=time.time()
461
+ self.model.eval()
462
+ y=self.model(x)
463
+ mtime=time.time()-mstart
464
+ pred=self._decode(y,len(example)).squeeze().tolist()
465
+ results={}
466
+ if isinstance(pred,int):
467
+ pred=[pred]
468
+ for p,l in zip(pred,labels):
469
+ results[p]=l
470
+
471
+ post_start=time.time()
472
+ results=self.postprocess(dict(sorted(results.items(), key=lambda item: item[0])),w,h,save_path,jpg_path)
473
+ ptime=time.time()-post_start
474
+ if verbose:
475
+ print(f"Using {time.time()-st:.3f}s to sort boxes,with {mtime:.3f}s on OrderFormer inference,{ptime:.3f}s on postprocess.")
476
+ if verbose and isinstance(jpg_path,str) and isinstance(save_path,str):
477
+ import cv2
478
+ frame = cv2.imread(jpg_path)
479
+
480
+ for idx ,points in results.items():
481
+ x1, y1, x2, y2 = int(points[0]), int(points[1]), int(points[2]), int(points[3])
482
+ cv2.rectangle(frame, (x1, y1), (x2, y2), thickness=2,color=(255,0,0),lineType=cv2.LINE_AA)
483
+ label_position = ((x1+x2)//2,(y1+y2)//2) # Adjust the position of the label as needed
484
+ cv2.putText(frame, str(idx), label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
485
+ name=jpg_path.split("/")[-1]
486
+ cv2.imwrite(save_path+"ordered_"+name,frame)
487
+
488
+ return dict(sorted(results.items(), key=lambda item: item[0]))
489
+
490
+
491
+
492
+ def postprocess(self,results,width,height,save_dir,jpg_path,vis=True,max_iters=5):
493
+ def ordered_permute(b1,b2,b3):
494
+ ws=[b1[2]-b1[0],b2[2]-b2[0],b3[2]-b3[0]]
495
+ hs=[b1[3]-b1[1],b2[3]-b2[1],b3[3]-b3[1]]
496
+ c1=[(b1[0]+b1[2])/2,(b1[1]+b1[3])/2]
497
+ c2=[(b2[0]+b2[2])/2,(b2[1]+b2[3])/2]
498
+ c3=[(b3[0]+b3[2])/2,(b3[1]+b3[3])/2]
499
+ s=[ws[0]*hs[0],ws[1]*hs[1],ws[2]*hs[2]]
500
+ if max(abs(c1[1]-c2[1]),abs(c1[1]-c3[1]),abs(c2[1]-c3[1]))<min(hs) and min(s)/max(s)>0.7:
501
+ c=[c1[0],c2[0],c3[0]]
502
+
503
+ else:
504
+ c=[3,2,1]
505
+ indexed_c = list(enumerate(c))
506
+
507
+
508
+ sorted_by_value = sorted(indexed_c, key=lambda x: x[1],reverse=True)
509
+
510
+
511
+ sorted_indices = [index for index, value in sorted_by_value]
512
+
513
+ return sorted_indices
514
+ index=list(results.keys())
515
+ boxes=[[item[0]/width,item[1]/height,item[2]/width,item[3]/height] for item in list(results.values())]
516
+ for i in range(len(index)-2):
517
+ now=boxes[i]
518
+ next_1=boxes[i+1]
519
+ next_2=boxes[i+2]
520
+ order=ordered_permute(now,next_1,next_2)
521
+
522
+ j=i+1
523
+ boxes[i],boxes[i+1],boxes[i+2]=boxes[i+order[0]],boxes[i+order[1]],boxes[i+order[2]]
524
+ results[j],results[j+1],results[j+2]=results[j+order[0]],results[j+order[1]],results[j+order[2]]
525
+
526
+ return results
527
+
528
+ def load_orderformer(path,
529
+ max_num=50,
530
+ input_dim=4,
531
+ output_dim=1,
532
+ model_dim=256,
533
+ num_layers=4,
534
+ num_heads=8,
535
+ ):
536
+
537
+ model=OrderFormer(max_nums=max_num,
538
+ num_layers=num_layers,
539
+ input_dim=input_dim,
540
+ output_dim=output_dim,
541
+ model_dim=model_dim,
542
+ num_heads=num_heads,
543
+ model_path=path,
544
+ label_name='turn',
545
+ norm=False)
546
+ return model
models/perceiver_resampler.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torch import nn
4
+ from einops import rearrange, repeat
5
+ from torch import einsum
6
+
7
+
8
+ class PerceiverAttention(nn.Module):
9
+ def __init__(
10
+ self,
11
+ *,
12
+ dim,
13
+ dim_head=64,
14
+ heads=8
15
+ ):
16
+ super().__init__()
17
+ self.scale = dim_head ** -0.5
18
+ self.heads = heads
19
+ inner_dim = dim_head * heads # 512
20
+
21
+ self.norm_media = nn.LayerNorm(dim)
22
+ self.norm_learns = nn.LayerNorm(dim)
23
+
24
+ self.to_q = nn.Linear(dim, inner_dim, bias=False) # 4096×512
25
+ self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) # 4096×1024
26
+ self.to_out = nn.Linear(inner_dim, dim, bias=False) # 512×4096
27
+
28
+ def forward(self, x, learns): # x(b, 256, 4096), learns(b, 3, 4096)
29
+ x = self.norm_media(x)
30
+ learns = self.norm_learns(learns)
31
+
32
+ b, n, h = *x.shape[:2], self.heads
33
+
34
+ q = self.to_q(learns) # q(b, 3, 512)
35
+
36
+ # 注意:在PerceiverResampler中,将输入和learns拼接后进行attention计算
37
+ kv_input = torch.cat((x, learns), dim=-2) # kv_input(b, 259, 4096)
38
+ k, v = self.to_kv(kv_input).chunk(2, dim=-1) # (b, 259, 1024)->k, v(b, 259, 512)
39
+
40
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) # q(b, 8, 3, 64) k, v(b, 8, 259, 64)
41
+
42
+ q = q * self.scale
43
+
44
+ # attention计算
45
+ sim = einsum('b h i d, b h j d -> b h i j', q, k)
46
+ sim = sim - sim.amax(dim=-1, keepdim=True).detach()
47
+ attn = sim.softmax(dim=-1) # sim, attn(b, 8, 3, 259)
48
+
49
+ out = einsum('b h i j, b h j d -> b h i d', attn, v) # out(b, 8, 3, 64)
50
+ out = rearrange(out, 'b h n d -> b n (h d)') # out(b, 3, 512)
51
+ return self.to_out(out) # return(b, 3, 4096)
52
+
53
+
54
+ class PerceiverResampler(nn.Module):
55
+ def __init__(
56
+ self,
57
+ *,
58
+ dim,
59
+ depth=6,
60
+ dim_head=64,
61
+ heads=8,
62
+ num_learns=3,
63
+ ff_mult=4,
64
+ ):
65
+ super().__init__()
66
+ self.learns = nn.Parameter(torch.randn(num_learns, dim))
67
+
68
+ self.layers = nn.ModuleList([])
69
+ for _ in range(depth):
70
+ self.layers.append(
71
+ nn.ModuleList(
72
+ [
73
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
74
+ FeedForward(dim=dim, mult=ff_mult),
75
+ ]
76
+ )
77
+ )
78
+
79
+ self.norm = nn.LayerNorm(dim)
80
+
81
+ def forward(self, x):
82
+ """
83
+ Args:
84
+ x (torch.Tensor): image features
85
+ shape (b, 256, 4096)
86
+ Returns:
87
+ shape (b, 3, 4096) where 3 is self.num_learns
88
+ """
89
+ b, n, d = x.shape
90
+
91
+
92
+ learns = repeat(self.learns, "n d -> b n d", b=b)
93
+
94
+
95
+ for attn, ff in self.layers:
96
+
97
+ learns = attn(x, learns) + learns
98
+ learns = ff(learns) + learns
99
+
100
+ return self.norm(learns)
101
+
102
+ class MLP(nn.Module):
103
+ def __init__(self, input_dim, hidden_mult=4): # input_dim = 256
104
+ super().__init__()
105
+ self.ff1 = FeedForward_2(input_dim, input_dim, hidden_mult)
106
+ self.ff2 = FeedForward_2(input_dim, 3, hidden_mult)
107
+
108
+ def forward(self, x):
109
+
110
+ x = x.permute(0, 2, 1)
111
+ x = self.ff1(x)
112
+ x = self.ff2(x)
113
+
114
+ x = x.permute(0, 2, 1)
115
+ return x
116
+
117
+ class MLP_6763(nn.Module):
118
+ def __init__(self, input_dim, output_dim, hidden_mult=2):
119
+ super().__init__()
120
+ self.ff1 = FeedForward_2(input_dim, output_dim, hidden_mult)
121
+ self.ff2 = FeedForward_2(output_dim, output_dim, hidden_mult)
122
+
123
+ def forward(self, x):
124
+ b, n, d = x.shape
125
+ x = x.view(b, -1)
126
+ x = self.ff1(x)
127
+ x = self.ff2(x)
128
+ return x
129
+
130
+ class FeedForward(nn.Module):
131
+ def __init__(self, dim, mult=4):
132
+ super().__init__()
133
+ self.net = nn.Sequential(
134
+ nn.LayerNorm(dim),
135
+ nn.Linear(dim, dim * mult),
136
+ nn.GELU(),
137
+ nn.Linear(dim * mult, dim),
138
+ )
139
+
140
+ def forward(self, x):
141
+ return self.net(x)
142
+
143
+ class FeedForward_2(nn.Module):
144
+ def __init__(self, input_dim, output_dim, mult=4):
145
+ super().__init__()
146
+ self.net = nn.Sequential(
147
+ nn.LayerNorm(input_dim),
148
+ nn.Linear(input_dim, input_dim * mult),
149
+ nn.GELU(),
150
+ nn.Linear(input_dim * mult, output_dim),
151
+ )
152
+
153
+ def forward(self, x):
154
+ return self.net(x)
models/similarity.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from __future__ import print_function
4
+ import torch
5
+ import torch.nn as nn
6
+ from .model import*
7
+ import torch.nn.functional as F
8
+
9
+ def vq_cos_sim(embedding, input_tensor, use_dynamic_p=False,ddp=False):
10
+
11
+ if ddp:
12
+ embedding_weight = embedding.module.weight
13
+ else:
14
+ embedding_weight = embedding.weight
15
+
16
+
17
+ input_norm = F.normalize(input_tensor, p=2, dim=2)
18
+ embedding_norm = F.normalize(embedding_weight, p=2, dim=1)
19
+
20
+ similarity = torch.matmul(input_norm, embedding_norm.t())
21
+ cos_sim_values, indices = similarity.max(dim=2)
22
+
23
+ if use_dynamic_p:
24
+
25
+ return indices.squeeze(), cos_sim_values.squeeze()
26
+
27
+ return indices.squeeze()
28
+
29
+
30
+ class RatioLossWithMSELoss(nn.Module):
31
+ def __init__(self, total_iters, min_weight=0.001, max_weight=1,eps=torch.tensor(1e-3, dtype=torch.bfloat16)):
32
+ super(RatioLossWithMSELoss, self).__init__()
33
+ self.eps = eps
34
+ self.total_iters = total_iters
35
+ self.min_weight = min_weight
36
+ self.max_weight = max_weight
37
+ self.mse=nn.MSELoss()
38
+ self.losses={}
39
+ def forward(self, output, target, current_iter):
40
+
41
+ weight = self.min_weight + (self.max_weight - self.min_weight) * (current_iter / self.total_iters)
42
+ loss = (torch.abs(target - output)) / (torch.abs(target) + self.eps)
43
+ weighted_loss = weight * loss
44
+
45
+ self.losses['ratio']=loss.mean()
46
+ self.losses['mse']=self.mse(output,target)
47
+ return weighted_loss.mean()+self.mse(output,target)
48
+
49
+
50
+
51
+
52
+ if __name__=='__main__':
53
+ pass
requirements.txt ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==1.3.0
2
+ aiohappyeyeballs
3
+ aiohttp==3.10.5
4
+ aiosignal==1.2.0
5
+ async-timeout
6
+ attrs==23.1.0
7
+ Bottleneck
8
+ Brotli
9
+ certifi
10
+ charset-normalizer
11
+ contourpy
12
+ cycler
13
+ decord==0.6.0
14
+ dill
15
+ einops==0.8.0
16
+ filelock
17
+
18
+ fonttools==4.51.0
19
+ frozenlist==1.4.0
20
+ fsspec
21
+ gmpy2
22
+ huggingface_hub
23
+ idna
24
+ importlib-metadata==7.0.1
25
+ importlib_resources
26
+ jieba
27
+ Jinja2
28
+ joblib==1.4.2
29
+ kiwisolver
30
+ Levenshtein==0.26.1
31
+ MarkupSafe
32
+ matplotlib==3.9.2
33
+ mkl-service==2.4.0
34
+ mkl_fft
35
+ mkl_random
36
+ mpmath
37
+ multidict==6.0.4
38
+ multiprocess==0.70.15
39
+ networkx
40
+ numexpr
41
+ numpy
42
+ OpenCC==1.1.6
43
+ opencv-python
44
+ opencv-python-headless
45
+ packaging
46
+ pandas
47
+ pillow
48
+ propcache==0.2.1
49
+ psutil
50
+ py-cpuinfo
51
+ pyarrow
52
+ pyarrow-hotfix
53
+ pyparsing
54
+ PySocks
55
+ python-dateutil
56
+ pytz
57
+ PyYAML
58
+ RapidFuzz==3.11.0
59
+ regex==2024.11.6
60
+ requests
61
+ rouge
62
+ safetensors==0.5.2
63
+ scikit-learn==1.5.1
64
+ scipy
65
+ seaborn
66
+ sentencepiece==0.2.0
67
+ six
68
+ sympy
69
+ threadpoolctl==3.5.0
70
+ timm==0.9.12
71
+ tokenizers==0.20.3
72
+ torch==2.4.0
73
+ torchaudio==2.4.0
74
+ torchvision==0.19.0
75
+ tqdm
76
+ transformers==4.45.2
77
+ triton==3.0.0
78
+ typing_extensions
79
+ tzdata
80
+ ultralytics
81
+ unicodedata2
82
+ urllib3
83
+ xxhash
84
+ yarl==1.11.0
85
+ zipp==3.17.0
86
+ ./flash_attn-2.6.1+cu118torch2.4cxx11abiFALSE-cp39-cp39-linux_x86_64.whl
test.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from InternVL import modeling_internvl_chat