moxeeeem commited on
Commit
37fbef7
·
verified ·
1 Parent(s): ba51cc5

Upload inference_min.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference_min.py +221 -0
inference_min.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from __future__ import annotations
3
+ import os, json, re
4
+ from pathlib import Path
5
+ from typing import List, Optional
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from PIL import Image
11
+
12
+ try:
13
+ import open_clip
14
+ HAS_OPENCLIP = True
15
+ except Exception:
16
+ HAS_OPENCLIP = False
17
+
18
+ from transformers import (
19
+ AutoModelForCausalLM, AutoTokenizer,
20
+ CLIPImageProcessor as HFCLIPImageProcessor,
21
+ CLIPModel as HFCLIPModel,
22
+ )
23
+
24
+ class PrefixProjector(nn.Module):
25
+ def __init__(self, in_dim: int, out_dim: int, tokens: int, p_drop: float = 0.05):
26
+ super().__init__()
27
+ hidden = max(512, out_dim * 2)
28
+ self.fc1 = nn.Linear(in_dim, hidden)
29
+ self.fc2 = nn.Linear(hidden, out_dim * tokens)
30
+ self.ln = nn.LayerNorm(out_dim)
31
+ self.tokens = tokens
32
+ self.drop = nn.Dropout(p_drop)
33
+ self.alpha = nn.Parameter(torch.tensor(0.5))
34
+ nn.init.xavier_uniform_(self.fc1.weight, gain=1.0)
35
+ nn.init.zeros_(self.fc1.bias)
36
+ nn.init.xavier_uniform_(self.fc2.weight, gain=0.5)
37
+ nn.init.zeros_(self.fc2.bias)
38
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
39
+ y = F.gelu(self.fc1(x))
40
+ y = self.fc2(y).view(x.size(0), self.tokens, -1)
41
+ y = self.ln(y)
42
+ y = self.drop(self.alpha * y)
43
+ return y
44
+
45
+ class CLIPBackend:
46
+ def __init__(self, repo_or_kind: str, device: str):
47
+ self.device = device
48
+ self.repo_or_kind = repo_or_kind
49
+
50
+ # Определяем тип модели
51
+ if 'BiomedCLIP' in repo_or_kind or 'microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224' in repo_or_kind:
52
+ # BiomedCLIP через open_clip
53
+ assert HAS_OPENCLIP, "open_clip is required for BiomedCLIP"
54
+ if not repo_or_kind.startswith('microsoft/'):
55
+ repo_or_kind = 'microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224'
56
+ model_name = f'hf-hub:{repo_or_kind}'
57
+ self.model, self.preprocess, _ = open_clip.create_model_and_transforms(model_name)
58
+ self.model = self.model.to(device).eval()
59
+ self.kind = "open_clip"
60
+ self.processor = None
61
+ elif "/" in repo_or_kind and 'pubmed-clip' in repo_or_kind:
62
+ # PubMedCLIP через HF
63
+ self.model = HFCLIPModel.from_pretrained(repo_or_kind).to(device).eval()
64
+ self.processor = HFCLIPImageProcessor.from_pretrained(repo_or_kind)
65
+ self.kind = "hf_clip"
66
+ self.preprocess = None
67
+ elif "/" in repo_or_kind or repo_or_kind.startswith('redlessone/'):
68
+ # DermLIP через open_clip
69
+ assert HAS_OPENCLIP, "open_clip is required for DermLIP"
70
+ model_name = f"hf-hub:{repo_or_kind}"
71
+ self.model, self.preprocess, _ = open_clip.create_model_and_transforms(model_name)
72
+ self.model = self.model.to(device).eval()
73
+ self.kind = "open_clip"
74
+ self.processor = None
75
+ else:
76
+ # Fallback для других моделей, включая случаи когда передается просто тип модели
77
+ try:
78
+ # Пытаемся определить по названию
79
+ if 'biomedclip' in repo_or_kind.lower() or 'biomed' in repo_or_kind.lower():
80
+ assert HAS_OPENCLIP, "open_clip is required for BiomedCLIP"
81
+ model_name = "hf-hub:microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224"
82
+ self.model, self.preprocess, _ = open_clip.create_model_and_transforms(model_name)
83
+ self.model = self.model.to(device).eval()
84
+ self.kind = "open_clip"
85
+ self.processor = None
86
+ elif 'dermlip' in repo_or_kind.lower():
87
+ assert HAS_OPENCLIP, "open_clip is required for DermLIP"
88
+ model_name = "hf-hub:redlessone/DermLIP_ViT-B-16"
89
+ self.model, self.preprocess, _ = open_clip.create_model_and_transforms(model_name)
90
+ self.model = self.model.to(device).eval()
91
+ self.kind = "open_clip"
92
+ self.processor = None
93
+ elif 'pubmed' in repo_or_kind.lower():
94
+ # PubMedCLIP через HF
95
+ repo_name = "flaviagiammarino/pubmed-clip-vit-base-patch32"
96
+ self.model = HFCLIPModel.from_pretrained(repo_name).to(device).eval()
97
+ self.processor = HFCLIPImageProcessor.from_pretrained(repo_name)
98
+ self.kind = "hf_clip"
99
+ self.preprocess = None
100
+ else:
101
+ raise ValueError(f"Unknown model type: {repo_or_kind}")
102
+ except Exception as e:
103
+ # Последняя попытка - попробовать как HF модель
104
+ try:
105
+ self.model = HFCLIPModel.from_pretrained(repo_or_kind).to(device).eval()
106
+ self.processor = HFCLIPImageProcessor.from_pretrained(repo_or_kind)
107
+ self.kind = "hf_clip"
108
+ self.preprocess = None
109
+ except:
110
+ raise ValueError(f"Failed to load model {repo_or_kind}: {e}")
111
+
112
+ # Определяем размер эмбеддинга
113
+ if self.kind == "open_clip":
114
+ with torch.no_grad():
115
+ img = Image.new('RGB', (224, 224), color=0)
116
+ x = self.preprocess(img).unsqueeze(0).to(device)
117
+ feat = self.model.encode_image(x)
118
+ self.embed_dim = int(feat.shape[-1])
119
+ else:
120
+ self.embed_dim = int(self.model.config.projection_dim)
121
+
122
+ @torch.inference_mode()
123
+ def encode_images(self, paths: List[str]) -> torch.Tensor:
124
+ ims = []
125
+ if self.kind == "open_clip":
126
+ for p in paths:
127
+ try:
128
+ im = Image.open(p).convert("RGB")
129
+ except:
130
+ im = Image.new("RGB", (224, 224), color=0)
131
+ ims.append(self.preprocess(im))
132
+ x = torch.stack(ims).to(self.device)
133
+ f = self.model.encode_image(x)
134
+ else:
135
+ # HF CLIP (PubMedCLIP)
136
+ for p in paths:
137
+ try:
138
+ im = Image.open(p).convert("RGB")
139
+ except:
140
+ im = Image.new("RGB", (224, 224), color=0)
141
+ ims.append(im)
142
+ proc = self.processor(images=ims, return_tensors='pt')
143
+ x = proc['pixel_values'].to(self.device)
144
+ f = self.model.get_image_features(pixel_values=x)
145
+ return F.normalize(f, dim=-1)
146
+
147
+ class Captioner(nn.Module):
148
+ def __init__(self, gpt2_name: str, clip_repo: str, prefix_tokens: int, prompt: str, device: str):
149
+ super().__init__()
150
+ self.device = device
151
+ self.prompt = prompt
152
+ self.tok = AutoTokenizer.from_pretrained(gpt2_name)
153
+ if self.tok.pad_token is None:
154
+ self.tok.pad_token = self.tok.eos_token
155
+ self.gpt2 = AutoModelForCausalLM.from_pretrained(gpt2_name).to(device).eval()
156
+ self.clip = CLIPBackend(clip_repo, device)
157
+ self.prefix = PrefixProjector(self.clip.embed_dim, int(self.gpt2.config.n_embd), prefix_tokens).to(device).eval()
158
+
159
+ @torch.inference_mode()
160
+ def generate(self, img_paths: List[str], prompt: Optional[str] = None) -> List[str]:
161
+ pr = prompt or self.prompt or ""
162
+ f = self.clip.encode_images(img_paths)
163
+ pref = self.prefix(f)
164
+ ids = self.tok([pr]*pref.size(0), return_tensors='pt', padding=True, truncation=True).to(self.device)
165
+ emb_prompt = self.gpt2.transformer.wte(ids['input_ids'])
166
+ inputs_embeds = torch.cat([pref, emb_prompt], dim=1)
167
+ attn = torch.ones(inputs_embeds.size()[:-1], dtype=torch.long, device=self.device)
168
+ gen = self.gpt2.generate(
169
+ inputs_embeds=inputs_embeds, attention_mask=attn,
170
+ max_new_tokens=60, min_new_tokens=24, num_beams=4,
171
+ no_repeat_ngram_size=4, repetition_penalty=1.15, length_penalty=0.6,
172
+ pad_token_id=self.tok.eos_token_id, eos_token_id=self.tok.eos_token_id, early_stopping=True
173
+ )
174
+ outs = self.tok.batch_decode(gen, skip_special_tokens=True)
175
+ res = []
176
+ for s in outs:
177
+ cut = s.find(pr)
178
+ if cut >= 0: s = s[cut+len(pr):]
179
+ res.append(s.strip())
180
+ return res
181
+
182
+ def load_model(repo_dir: str | os.PathLike) -> Captioner:
183
+ repo_dir = Path(repo_dir)
184
+ cfgs = sorted(repo_dir.glob("final_captioner_*.json"))
185
+ if not cfgs:
186
+ raise FileNotFoundError("final_captioner_*.json not found in repo snapshot")
187
+ data = json.loads(cfgs[-1].read_text(encoding='utf-8'))
188
+ gpt2 = data.get("gpt2_name", "gpt2-medium")
189
+
190
+ # Определяем CLIP репозиторий с поддержкой TimmModel
191
+ clip_repo = data.get("clip_weight_path", data.get("clip_repo", data.get("clip_backend_kind", "")))
192
+
193
+ # Если информация о CLIP не найдена в JSON, пытаемся определить по имени файла
194
+ if not clip_repo or clip_repo in ["open_clip", "hf_clip"]:
195
+ ckpts = sorted(repo_dir.glob("final_captioner_*.pt"))
196
+ if ckpts:
197
+ ckpt_name = str(ckpts[-1])
198
+ if "TimmModel" in ckpt_name:
199
+ clip_repo = "microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224"
200
+ elif "VisionTransformer" in ckpt_name:
201
+ clip_repo = "redlessone/DermLIP_ViT-B-16"
202
+ elif "CLIPModel" in ckpt_name:
203
+ clip_repo = "flaviagiammarino/pubmed-clip-vit-base-patch32"
204
+ elif "biomedclip" in ckpt_name.lower():
205
+ clip_repo = "microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224"
206
+
207
+ prefix_tokens = int(data.get("prefix_tokens", 32))
208
+ prompt = data.get("prompt", "Describe the skin lesion.")
209
+ device = "cuda" if torch.cuda.is_available() else "cpu"
210
+ model = Captioner(gpt2, clip_repo, prefix_tokens, prompt, device).to(device).eval()
211
+ # подгрузим state_dict
212
+ ckpts = sorted(repo_dir.glob("final_captioner_*.pt"))
213
+ if not ckpts:
214
+ raise FileNotFoundError("final_captioner_*.pt not found in repo snapshot")
215
+ state = torch.load(ckpts[-1], map_location="cpu")
216
+ sd = state.get("model", state)
217
+ model.load_state_dict(sd, strict=False)
218
+ return model
219
+
220
+ def generate(model: Captioner, img_paths: List[str], prompt: Optional[str] = None) -> List[str]:
221
+ return model.generate(img_paths, prompt=prompt)