Vasudevakrishna commited on
Commit
2a9f4d0
·
1 Parent(s): 7417ef5

Phase2 Training added.

Browse files
app.py CHANGED
@@ -1,55 +1,93 @@
 
1
  import torch
 
 
 
2
  from transformers import AutoTokenizer, AutoProcessor
3
- from model import CustomClipPhi2
4
- import gradio as gr
5
 
6
- clip_model_name = "openai/clip-vit-base-patch32"
7
- phi2_model_name = "microsoft/phi-2"
 
 
 
 
 
8
 
9
- tokenizer = AutoTokenizer.from_pretrained(phi2_model_name, trust_remote_code=True)
10
- processor = AutoProcessor.from_pretrained(clip_model_name)
11
- tokenizer.pad_token = tokenizer.eos_token
12
 
13
- IMAGE_TOKEN_ID = 23903 # token for word Comments
14
- device = "cuda" if torch.cuda.is_available() else "cpu"
15
- # max_tokens = 30
16
- model = CustomClipPhi2(tokenizer, phi2_model_name, clip_model_name, clip_embed=768, phi_embed=2560)
 
 
 
 
 
 
 
 
17
 
18
- def generate(images, max_tokens):
19
- image = processor(images=images, return_tensors="pt").to(device)
20
- clip_outputs = model.clip_model(**image)
 
 
 
21
  # remove cls token
22
  images = clip_outputs.last_hidden_state[:, 1:, :]
23
- image_embeddings = model.projection_layer(images).to(torch.float16)
24
-
25
- batch_size = images.size()[0]
26
- predicted_caption = torch.full((batch_size, max_tokens), model.EOS_TOKEN_ID, dtype=torch.long, device=device)
27
- img_token_tensor = torch.tensor(IMAGE_TOKEN_ID).repeat(batch_size, 1)
28
- img_token_embeds = model.phi2_model.model.embed_tokens(img_token_tensor.to(image_embeddings.device))
29
- combined_embeds = torch.cat([image_embeddings, img_token_embeds], dim=1)
30
-
31
- for pos in range(max_tokens - 1):
32
- model_output_logits = model.phi2_model.forward(inputs_embeds = combined_embeds)['logits']
33
- predicted_word_token_logits = model_output_logits[:, -1, :].unsqueeze(1)
34
- predicted_word_token = torch.argmax(predicted_word_token_logits, dim = -1)
35
- predicted_caption[:, pos] = predicted_word_token.view(1,-1).to('cpu')
36
- next_token_embeds = model.phi2_model.model.embed_tokens(predicted_word_token)
37
- combined_embeds = torch.cat([combined_embeds, next_token_embeds], dim=1)
38
- predicted_captions_decoded = tokenizer.batch_decode(predicted_caption,ignore_index = tokenizer.eos_token_id)
39
- return predicted_captions_decoded
40
-
41
- # Create a Gradio interface
42
- iface = gr.Interface(fn=generate, # Function to be called on user input
43
- inputs=[gr.Image(
44
- width=416, height=416,
45
- type="pil", image_mode='RGB', label="Upload Image"
46
- ),
47
- gr.Slider(1, 50, value = 10, step=1, label="Max Length")],
48
- outputs=gr.Textbox(
49
- label="Response from AI Model: ",
50
- )
51
- # examples = ['car.jpg']
52
- )
53
-
54
- # Launch the Gradio app
55
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
  import torch
3
+ import wisperx
4
+ from model import MainQLoraModel
5
+ from configs import get_config_phase2
6
  from transformers import AutoTokenizer, AutoProcessor
 
 
7
 
8
+ # get config
9
+ config = get_config_phase2()
10
+ # tokenizer
11
+ tokenizer = AutoTokenizer.from_pretrained(config.get("phi2_model_name"), trust_remote_code=True)
12
+ processor = AutoProcessor.from_pretrained(config.get("clip_model_name"), trust_remote_code=True)
13
+ llmModel = MainQLoraModel(tokenizer, config).to(config.get("device"))
14
+ audio_model = wisperx.load_model('tiny', 'cpu', compute_type="float16")
15
 
 
 
 
16
 
17
+ def generate_answers(img=None, aud = None, q = None, max_tokens = 30):
18
+
19
+ batch_size = 1
20
+ start_iq = tokenizer.encode("<iQ>")
21
+ end_iq = tokenizer.encode("</iQ>")
22
+ start_iq_embeds = torch.tensor(start_iq).repeat(batch_size, 1)
23
+ end_iq_embeds = torch.tensor(end_iq).repeat(batch_size, 1)
24
+ start_iq_embeds = llmModel.phi2_model.model.model.embed_tokens(start_iq_embeds.to(config.get("device")))
25
+ end_iq_embeds = llmModel.phi2_model.model.model.embed_tokens(end_iq_embeds.to(config.get("device")))
26
+
27
+ inputs_embeddings = []
28
+ inputs_embeddings.append(start_iq_embeds)
29
 
30
+ predicted_caption = torch.full((batch_size, max_tokens), llmModel.EOS_TOKEN_ID, dtype=torch.long, device=config.get('device'))
31
+
32
+ if images is not None:
33
+ images = processor(images=img, return_tensors="pt").to(config.get("device"))
34
+ images = {'pixel_values': images.to(config.get("device"))}
35
+ clip_outputs = llmModel.clip_model(**images)
36
  # remove cls token
37
  images = clip_outputs.last_hidden_state[:, 1:, :]
38
+ image_embeddings = llmModel.projection_layer(images).to(torch.float16)
39
+ inputs_embeddings.append(image_embeddings)
40
+
41
+ if aud is not None:
42
+ trans = audio_model.transcribe(aud)
43
+ audio_res = ""
44
+ for seg in trans['segments']:
45
+ audio_res += seg['text']
46
+ audio_res = audio_res.strip()
47
+ audio_tokens = tokenizer(q,return_tensors="pt", return_attention_mask=False)['input_ids']
48
+ audio_embeds = llmModel.phi2_model.model.model.embed_tokens(audio_tokens.to(config.get("device")))
49
+ inputs_embeddings.append(audio_embeds)
50
+
51
+ if q is not None:
52
+ ques = tokenizer(q, return_tensors="pt", return_attention_mask=False)['input_ids']
53
+ q_embeds = llmModel.phi2_model.model.model.embed_tokens(ques.to(config.get("device")))
54
+ inputs_embeddings.append(q_embeds)
55
+
56
+ inputs_embeddings.append(end_iq_embeds)
57
+ # Combine embeddings
58
+ combined_embeds = torch.cat(inputs_embeddings, dim=1)
59
+
60
+ for pos in range(max_tokens - 1):
61
+ model_output_logits = llmModel.phi2_model.forward(inputs_embeds = combined_embeds)['logits']
62
+ predicted_word_token_logits = model_output_logits[:, -1, :].unsqueeze(1)
63
+ predicted_word_token = torch.argmax(predicted_word_token_logits, dim = -1)
64
+ predicted_caption[:, pos] = predicted_word_token.view(1,-1).to('cpu')
65
+ next_token_embeds = llmModel.phi2_model.model.model.embed_tokens(predicted_word_token)
66
+ combined_embeds = torch.cat([combined_embeds, next_token_embeds], dim=1)
67
+ predicted_captions_decoded = tokenizer.batch_decode(predicted_caption,ignore_index = 50256)[0]
68
+ return predicted_captions_decoded
69
+
70
+
71
+ with gr.Blocks() as demo:
72
+
73
+ gr.Markdown(
74
+ """
75
+ # TAI2T Model(Text, Audio, Image to Text Model)
76
+ Multimodel GPT with inputs as Image, Audio, Text with output as Text.
77
+ """
78
+ )
79
+
80
+ with gr.Row():
81
+ image = gr.Image(label="Image", type="pil")
82
+ audio_q = gr.Audio(label="Audio Question", sources=['microphone', 'upload'], type='filepath')
83
+ with gr.Row():
84
+ question = gr.Text(label ='Question?')
85
+ with gr.Row():
86
+ max_tokens = gr.Slider(1, 50, value = 10, step=1, label="Maximum length of tokens in asnwer.")
87
+ submit = gr.Button("Submit")
88
+ with gr.Row():
89
+ answer = gr.Text(label ='Answer')
90
+ submit.click(generate_answers, inputs=[image,audio_q,question, max_tokens], outputs=[answer])
91
+
92
+ if __name__ == "__main__":
93
+ demo.launch(share=True)
ckpts/Qlora_adaptor/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: microsoft/phi-2
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.2.dev0
ckpts/Qlora_adaptor/adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/phi-2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 64,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "fc1",
23
+ "fc2",
24
+ "q_proj",
25
+ "k_proj",
26
+ "dense",
27
+ "v_proj"
28
+ ],
29
+ "task_type": "CAUSAL_LM",
30
+ "use_rslora": false
31
+ }
ckpts/Qlora_adaptor/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:667821e30438c00dd61ef1b2d55f0cb07411c608e5c375ba6723d90aa2695f7d
3
+ size 377538512
configs.py CHANGED
@@ -1,6 +1,4 @@
1
  import torch
2
- import multiprocessing
3
-
4
  def get_config_phase1():
5
  return {
6
  "data_dir": "./data",
@@ -13,24 +11,23 @@ def get_config_phase1():
13
  "max_tokens": 20,
14
  "clip_embed": 768,
15
  "phi_embed": 2560,
16
- "num_workers": 32,
17
  "ckpts": "./ckpts"
18
  }
19
 
20
  def get_config_phase2():
21
  return {
22
- "i150k_json": "./data/llava_instruct_150k.json",
23
- "QA_datasetName": "OpenAssistant/oasst1",
24
  "clip_model_name": "openai/clip-vit-base-patch16",
25
  "phi2_model_name": "microsoft/phi-2",
26
  "train_batch_size": 1,
27
  "val_batch_size": 1,
28
  "device": torch.device("cuda" if torch.cuda.is_available() else "cpu"),
29
- "epochs": 2,
30
- "max_tokens": 20,
31
  "clip_embed": 768,
32
  "phi_embed": 2560,
33
- "num_workers": 1,
34
  "ckpts": "./ckpts",
35
  "vocab_size": 51200
36
  }
 
1
  import torch
 
 
2
  def get_config_phase1():
3
  return {
4
  "data_dir": "./data",
 
11
  "max_tokens": 20,
12
  "clip_embed": 768,
13
  "phi_embed": 2560,
14
+ "num_workers": 4,
15
  "ckpts": "./ckpts"
16
  }
17
 
18
  def get_config_phase2():
19
  return {
20
+ "data_dir": "./data",
 
21
  "clip_model_name": "openai/clip-vit-base-patch16",
22
  "phi2_model_name": "microsoft/phi-2",
23
  "train_batch_size": 1,
24
  "val_batch_size": 1,
25
  "device": torch.device("cuda" if torch.cuda.is_available() else "cpu"),
26
+ "epochs": 10,
27
+ "max_tokens": 100,
28
  "clip_embed": 768,
29
  "phi_embed": 2560,
30
+ "num_workers": 0,
31
  "ckpts": "./ckpts",
32
  "vocab_size": 51200
33
  }
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
  torch
2
- torchvision
3
  git+https://github.com/huggingface/peft.git
4
  accelerate
5
  transformers
6
- einops
 
 
1
  torch
 
2
  git+https://github.com/huggingface/peft.git
3
  accelerate
4
  transformers
5
+ einops
6
+ git+https://github.com/m-bain/whisperx.git