File size: 663 Bytes
fcfea15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import torch

from transformers import Qwen3VLForConditionalGeneration, AutoTokenizer


def load_text_encoder(
    text_encoder_ckpt: str,
    device: torch.device = torch.device("cpu"),
    torch_dtype: torch.dtype = torch.bfloat16,
):
    loader = Qwen3VLForConditionalGeneration #or AutoModelForVision2Seq
    model = loader.from_pretrained(
        text_encoder_ckpt,
        torch_dtype=torch_dtype,
        local_files_only=True,
        trust_remote_code=True,
    ).to(device).eval()
    tokenizer = AutoTokenizer.from_pretrained(
        text_encoder_ckpt,
        local_files_only=True,
        trust_remote_code=True,
    )
    return tokenizer, model