chanakarnuac commited on
Commit
956dd5c
·
verified ·
1 Parent(s): e0ffd8b

สร้าง app.py สำหรับ G01_Computer_Vision_Joy-caption-pre-alpha

Browse files
Files changed (1) hide show
  1. app.py +129 -0
app.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
+ from torch import nn
5
+ from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM
6
+ from pathlib import Path
7
+ import torch
8
+ import torch.amp.autocast_mode
9
+ from PIL import Image
10
+ import os
11
+
12
+
13
+ CLIP_PATH = "google/siglip-so400m-patch14-384"
14
+ VLM_PROMPT = "A descriptive caption for this image:\n"
15
+ MODEL_PATH = "meta-llama/Meta-Llama-3.1-8B"
16
+ CHECKPOINT_PATH = Path("wpkklhc6")
17
+ TITLE = "<h1><center>JoyCaption Pre-Alpha (2024-07-30a)</center></h1>"
18
+
19
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
20
+
21
+
22
+ class ImageAdapter(nn.Module):
23
+ def __init__(self, input_features: int, output_features: int):
24
+ super().__init__()
25
+ self.linear1 = nn.Linear(input_features, output_features)
26
+ self.activation = nn.GELU()
27
+ self.linear2 = nn.Linear(output_features, output_features)
28
+
29
+ def forward(self, vision_outputs: torch.Tensor):
30
+ x = self.linear1(vision_outputs)
31
+ x = self.activation(x)
32
+ x = self.linear2(x)
33
+ return x
34
+
35
+
36
+ # Load CLIP
37
+ print("Loading CLIP")
38
+ clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)
39
+ clip_model = AutoModel.from_pretrained(CLIP_PATH)
40
+ clip_model = clip_model.vision_model
41
+ clip_model.eval()
42
+ clip_model.requires_grad_(False)
43
+ clip_model.to("cuda")
44
+
45
+
46
+ # Tokenizer
47
+ print("Loading tokenizer")
48
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False)
49
+ assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f"Tokenizer is of type {type(tokenizer)}"
50
+
51
+ # LLM
52
+ print("Loading LLM")
53
+ text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto", torch_dtype=torch.bfloat16)
54
+ text_model.eval()
55
+
56
+ # Image Adapter
57
+ print("Loading image adapter")
58
+ image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size)
59
+ image_adapter.load_state_dict(torch.load(CHECKPOINT_PATH / "image_adapter.pt", map_location="cpu"))
60
+ image_adapter.eval()
61
+ image_adapter.to("cuda")
62
+
63
+
64
+ @spaces.GPU()
65
+ @torch.no_grad()
66
+ def stream_chat(input_image: Image.Image):
67
+ torch.cuda.empty_cache()
68
+
69
+ # Preprocess image
70
+ image = clip_processor(images=input_image, return_tensors='pt').pixel_values
71
+ image = image.to('cuda')
72
+
73
+ # Tokenize the prompt
74
+ prompt = tokenizer.encode(VLM_PROMPT, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)
75
+
76
+ # Embed image
77
+ with torch.amp.autocast_mode.autocast('cuda', enabled=True):
78
+ vision_outputs = clip_model(pixel_values=image, output_hidden_states=True)
79
+ image_features = vision_outputs.hidden_states[-2]
80
+ embedded_images = image_adapter(image_features)
81
+ embedded_images = embedded_images.to('cuda')
82
+
83
+ # Embed prompt
84
+ prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))
85
+ assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}"
86
+ embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))
87
+
88
+ # Construct prompts
89
+ inputs_embeds = torch.cat([
90
+ embedded_bos.expand(embedded_images.shape[0], -1, -1),
91
+ embedded_images.to(dtype=embedded_bos.dtype),
92
+ prompt_embeds.expand(embedded_images.shape[0], -1, -1),
93
+ ], dim=1)
94
+
95
+ input_ids = torch.cat([
96
+ torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),
97
+ torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),
98
+ prompt,
99
+ ], dim=1).to('cuda')
100
+ attention_mask = torch.ones_like(input_ids)
101
+
102
+ #generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=False, suppress_tokens=None)
103
+ generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, top_k=10, temperature=0.5, suppress_tokens=None)
104
+
105
+ # Trim off the prompt
106
+ generate_ids = generate_ids[:, input_ids.shape[1]:]
107
+ if generate_ids[0][-1] == tokenizer.eos_token_id:
108
+ generate_ids = generate_ids[:, :-1]
109
+
110
+ caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
111
+
112
+ return caption.strip()
113
+
114
+
115
+ with gr.Blocks() as demo:
116
+ gr.HTML(TITLE)
117
+ with gr.Row():
118
+ with gr.Column():
119
+ input_image = gr.Image(type="pil", label="Input Image")
120
+ run_button = gr.Button("Caption")
121
+
122
+ with gr.Column():
123
+ output_caption = gr.Textbox(label="Caption")
124
+
125
+ run_button.click(fn=stream_chat, inputs=[input_image], outputs=[output_caption])
126
+
127
+
128
+ if __name__ == "__main__":
129
+ demo.launch()