| #include "models.h" |
|
|
| |
| |
| ggml_tensor * clip_graph_mobilenetv5::rms_norm_2d(ggml_tensor * inp, ggml_tensor * weight, float eps) { |
| |
|
|
| ggml_tensor * cur = ggml_permute(ctx0, inp, 2, 1, 0, 3); |
| cur = ggml_cont(ctx0, cur); |
| cur = ggml_rms_norm(ctx0, cur, eps); |
|
|
| if (weight) { |
| cur = ggml_mul(ctx0, cur, weight); |
| } |
|
|
| cur = ggml_permute(ctx0, cur, 2, 1, 0, 3); |
| cur = ggml_cont(ctx0, cur); |
|
|
| return cur; |
| } |
|
|
| |
| ggml_tensor* clip_graph_mobilenetv5::pad_same_2d(ggml_tensor* inp, int kernel_h, int kernel_w, int stride_h, int stride_w, int dilation_h, int dilation_w) { |
| const int64_t ih = inp->ne[1]; |
| const int64_t iw = inp->ne[0]; |
|
|
| |
| const int64_t oh = (ih + stride_h - 1) / stride_h; |
| const int64_t ow = (iw + stride_w - 1) / stride_w; |
|
|
| |
| const int64_t pad_h = std::max((int64_t)0, (oh - 1) * stride_h + (kernel_h - 1) * dilation_h + 1 - ih); |
| const int64_t pad_w = std::max((int64_t)0, (ow - 1) * stride_w + (kernel_w - 1) * dilation_w + 1 - iw); |
|
|
| |
| const int pad_h_top = pad_h / 2; |
| const int pad_h_bottom = pad_h - pad_h_top; |
| const int pad_w_left = pad_w / 2; |
| const int pad_w_right = pad_w - pad_w_left; |
|
|
| |
| |
| |
| if (pad_h > 0 || pad_w > 0) { |
| inp = ggml_pad_ext(ctx0, inp, |
| pad_w_left, pad_w_right, |
| pad_h_top, pad_h_bottom, |
| 0, 0, |
| 0, 0); |
| } |
|
|
| return inp; |
| } |
|
|
|
|
| |
| ggml_tensor * clip_graph_mobilenetv5::build_edge_residual(ggml_tensor * inp, const mobilenetv5_block & block, int stride) { |
| ggml_tensor * cur = inp; |
|
|
| |
| if (stride == 2) { |
| |
| |
| cur = pad_same_2d(cur, 3, 3, stride, stride); |
| cur = ggml_conv_2d_direct(ctx0, block.s0_conv_exp_w, cur, stride, stride, 0, 0, 1, 1); |
| } else { |
| |
| |
| cur = ggml_conv_2d_direct(ctx0, block.s0_conv_exp_w, cur, stride, stride, 1, 1, 1, 1); |
| } |
|
|
| |
| if (block.s0_bn1_w) cur = rms_norm_2d(cur, block.s0_bn1_w); |
| cur = ggml_gelu(ctx0, cur); |
|
|
| |
| |
| cur = ggml_conv_2d_direct(ctx0, block.s0_conv_pwl_w, cur, 1, 1, 0, 0, 1, 1); |
| if (block.s0_bn2_w) cur = rms_norm_2d(cur, block.s0_bn2_w); |
|
|
| |
| |
| if (stride == 1 && inp->ne[2] == cur->ne[2] && inp->ne[0] == cur->ne[0]) { |
| cur = ggml_add(ctx0, cur, inp); |
| } |
|
|
| return cur; |
| } |
|
|
| |
| ggml_tensor * clip_graph_mobilenetv5::build_inverted_residual(ggml_tensor * inp, const mobilenetv5_block & block, int stride) { |
| ggml_tensor * cur = inp; |
|
|
| |
| |
| if (block.dw_start_w) { |
| int k = block.dw_start_w->ne[0]; |
| int p = k / 2; |
| cur = ggml_conv_2d_dw(ctx0, block.dw_start_w, cur, 1, 1, p, p, 1, 1); |
| if (block.dw_start_bn_w) cur = rms_norm_2d(cur, block.dw_start_bn_w); |
| } |
|
|
| |
| if (block.pw_exp_w) { |
| |
| cur = ggml_conv_2d_direct(ctx0, block.pw_exp_w, cur, 1, 1, 0, 0, 1, 1); |
| if (block.pw_exp_bn_w) cur = rms_norm_2d(cur, block.pw_exp_bn_w); |
| cur = ggml_gelu(ctx0, cur); |
| } |
|
|
| |
| |
| if (block.dw_mid_w) { |
| int k = block.dw_mid_w->ne[0]; |
|
|
| if (stride > 1) { |
| |
| cur = pad_same_2d(cur, k, k, stride, stride); |
| cur = ggml_conv_2d_dw(ctx0, block.dw_mid_w, cur, stride, stride, 0, 0, 1, 1); |
| } else { |
| |
| int p = k / 2; |
| cur = ggml_conv_2d_dw(ctx0, block.dw_mid_w, cur, stride, stride, p, p, 1, 1); |
| } |
|
|
| if (block.dw_mid_bn_w) cur = rms_norm_2d(cur, block.dw_mid_bn_w); |
| cur = ggml_gelu(ctx0, cur); |
| } |
|
|
| |
| if (block.pw_proj_w) { |
| cur = ggml_conv_2d_direct(ctx0, block.pw_proj_w, cur, 1, 1, 0, 0, 1, 1); |
| if (block.pw_proj_bn_w) cur = rms_norm_2d(cur, block.pw_proj_bn_w); |
| } |
|
|
| |
| if (block.layer_scale_w) { |
| cur = ggml_mul(ctx0, cur, block.layer_scale_w); |
| } |
|
|
| |
| bool same_spatial = (inp->ne[0] == cur->ne[0]) && (inp->ne[1] == cur->ne[1]); |
| bool same_channel = (inp->ne[2] == cur->ne[2]); |
| if (same_spatial && same_channel) { |
| cur = ggml_add(ctx0, cur, inp); |
| } |
|
|
| return cur; |
| } |
|
|
| |
| ggml_tensor * clip_graph_mobilenetv5::build_mobilenet_attn(ggml_tensor * inp, const mobilenetv5_block & block) { |
| ggml_tensor * cur = inp; |
|
|
| |
| if (block.attn_norm_w) { |
| cur = rms_norm_2d(cur, block.attn_norm_w, 1e-6f); |
| } |
|
|
| |
| ggml_tensor * q = ggml_conv_2d_direct(ctx0, block.attn_q_w, cur, 1, 1, 0, 0, 1, 1); |
|
|
| |
| |
| ggml_tensor * k_inp = cur; |
| if (block.attn_k_dw_w) { |
| int k_size = block.attn_k_dw_w->ne[0]; |
| k_inp = pad_same_2d(cur, k_size, k_size, 2, 2); |
| k_inp = ggml_conv_2d_dw(ctx0, block.attn_k_dw_w, k_inp, 2, 2, 0, 0, 1, 1); |
| if (block.attn_k_norm_w) { |
| k_inp = rms_norm_2d(k_inp, block.attn_k_norm_w, 1e-6f); |
| } |
| } |
| ggml_tensor * k = ggml_conv_2d_direct(ctx0, block.attn_k_w, k_inp, 1, 1, 0, 0, 1, 1); |
|
|
| |
| |
| ggml_tensor * v_inp = cur; |
| if (block.attn_v_dw_w) { |
| int v_size = block.attn_v_dw_w->ne[0]; |
| v_inp = pad_same_2d(cur, v_size, v_size, 2, 2); |
| v_inp = ggml_conv_2d_dw(ctx0, block.attn_v_dw_w, v_inp, 2, 2, 0, 0, 1, 1); |
| if (block.attn_v_norm_w) { |
| v_inp = rms_norm_2d(v_inp, block.attn_v_norm_w, 1e-6f); |
| } |
| } |
| ggml_tensor * v = ggml_conv_2d_direct(ctx0, block.attn_v_w, v_inp, 1, 1, 0, 0, 1, 1); |
|
|
| const int W = cur->ne[0]; const int H = cur->ne[1]; const int B = cur->ne[3]; |
| const int D = k->ne[2]; |
| const int n_head = q->ne[2] / D; |
| const int N = W * H; |
|
|
| |
| q = ggml_reshape_3d(ctx0, q, N, D*n_head, B); |
| q = ggml_reshape_4d(ctx0, q, N, D, n_head, B); |
| q = ggml_permute(ctx0, q, 1, 0, 2, 3); |
| q = ggml_cont(ctx0, q); |
|
|
| const int Wk = k->ne[0]; const int Hk = k->ne[1]; |
| const int M = Wk * Hk; |
|
|
| |
| k = ggml_reshape_3d(ctx0, k, M, D, B); |
| k = ggml_reshape_4d(ctx0, k, M, D, 1, B); |
| k = ggml_permute(ctx0, k, 1, 0, 2, 3); |
| k = ggml_cont(ctx0, k); |
|
|
| |
| v = ggml_reshape_3d(ctx0, v, M, D, B); |
| v = ggml_reshape_4d(ctx0, v, M, D, 1, B); |
| v = ggml_cont(ctx0, v); |
|
|
| |
| float scale = 1.0f / sqrtf((float)D); |
|
|
| |
| ggml_tensor * scores = ggml_mul_mat(ctx0, k, q); |
|
|
| scores = ggml_scale(ctx0, scores, scale); |
|
|
| scores = ggml_soft_max(ctx0, scores); |
|
|
| ggml_tensor * kqv = ggml_mul_mat(ctx0, v, scores); |
|
|
| kqv = ggml_permute(ctx0, kqv, 1, 0, 2, 3); |
| kqv = ggml_cont(ctx0, kqv); |
|
|
|
|
| kqv = ggml_reshape_3d(ctx0, kqv, N, D * n_head, B); |
| kqv = ggml_reshape_4d(ctx0, kqv, W, H, D * n_head, B); |
| kqv = ggml_cont(ctx0, kqv); |
|
|
| |
| cur = ggml_conv_2d_direct(ctx0, block.attn_o_w, kqv, 1, 1, 0, 0, 1, 1); |
|
|
| |
| if (inp->ne[0] == cur->ne[0] && inp->ne[2] == cur->ne[2]) { |
| if (block.layer_scale_w) { |
| cur = ggml_mul(ctx0, cur, block.layer_scale_w); |
| } |
| cur = ggml_add(ctx0, cur, inp); |
| } |
|
|
| return cur; |
| } |
|
|
| ggml_cgraph * clip_graph_mobilenetv5::build() { |
| ggml_tensor * inp = build_inp_raw(); |
|
|
| |
| ggml_tensor * cur = pad_same_2d(inp, 3, 3, 2, 2); |
|
|
| cur = ggml_conv_2d_direct(ctx0, model.mobilenet_stem_conv_w, cur, 2, 2, 0, 0, 1, 1); |
| if (model.mobilenet_stem_conv_b) { |
| cur = ggml_add(ctx0, cur, model.mobilenet_stem_conv_b); |
| } |
| if (model.mobilenet_stem_norm_w) cur = rms_norm_2d(cur, model.mobilenet_stem_norm_w); |
| cur = ggml_gelu(ctx0, cur); |
|
|
|
|
| |
| std::vector<ggml_tensor*> intermediate_features; |
| const int total_blocks = model.mobilenet_blocks.size(); |
|
|
| auto is_stage_start = [&](int i) { |
| if (i == 0) return true; |
| for (int end_idx : model.mobilenet_stage_ends) { |
| if (i == end_idx + 1) return true; |
| } |
| return false; |
| }; |
|
|
| auto is_fusion_point = [&](int i) { |
| if (model.mobilenet_stage_ends.size() >= 4) { |
| if (i == model.mobilenet_stage_ends[2]) return true; |
| if (i == model.mobilenet_stage_ends[3]) return true; |
| } else { |
| if (i == total_blocks - 1) return true; |
| } |
| return false; |
| }; |
|
|
| for (int i = 0; i < total_blocks; i++) { |
| const auto & block = model.mobilenet_blocks[i]; |
| int stride = is_stage_start(i) ? 2 : 1; |
|
|
| if (block.s0_conv_exp_w) cur = build_edge_residual(cur, block, stride); |
| else if (block.attn_q_w) cur = build_mobilenet_attn(cur, block); |
| else cur = build_inverted_residual(cur, block, stride); |
|
|
| if (is_fusion_point(i)) { |
|
|
| intermediate_features.push_back(cur); |
| } |
| } |
|
|
| |
| if (!intermediate_features.empty()) { |
|
|
| |
| |
| |
| ggml_tensor* target_feat = intermediate_features[0]; |
| int high_res_w = target_feat->ne[0]; |
| int high_res_h = target_feat->ne[1]; |
|
|
| std::vector<ggml_tensor*> resized_feats; |
|
|
| |
| for (auto feat : intermediate_features) { |
| int feat_w = feat->ne[0]; |
| int feat_h = feat->ne[1]; |
|
|
| |
| if (feat_w < high_res_w || feat_h < high_res_h) { |
| |
| |
| |
| |
| int scale_w = high_res_w / feat_w; |
| |
|
|
| |
| GGML_ASSERT(high_res_w % feat_w == 0); |
|
|
| |
| |
| feat = ggml_upscale(ctx0, feat, scale_w, ggml_scale_mode::GGML_SCALE_MODE_NEAREST); |
| } |
| resized_feats.push_back(feat); |
| } |
|
|
| |
| cur = resized_feats[0]; |
| for (size_t k = 1; k < resized_feats.size(); ++k) { |
| cur = ggml_concat(ctx0, cur, resized_feats[k], 2); |
| } |
|
|
| |
| |
|
|
| |
| if (model.msfa_ffn_expand_w) { |
| |
| cur = ggml_conv_2d_direct(ctx0, model.msfa_ffn_expand_w, cur, 1, 1, 0, 0, 1, 1); |
|
|
| if (model.msfa_ffn_expand_bn) { |
| cur = rms_norm_2d(cur, model.msfa_ffn_expand_bn); |
| } |
|
|
| cur = ggml_gelu(ctx0, cur); |
|
|
| } |
|
|
| |
| if (model.msfa_ffn_project_w) { |
| |
| cur = ggml_conv_2d_direct(ctx0, model.msfa_ffn_project_w, cur, 1, 1, 0, 0, 1, 1); |
|
|
| |
| if (model.msfa_ffn_project_bn) { |
| cur = rms_norm_2d(cur, model.msfa_ffn_project_bn); |
| } |
|
|
| } |
|
|
| |
| |
| const int target_out_res = 16; |
| int current_w = cur->ne[0]; |
|
|
| if (current_w > target_out_res) { |
| int s = current_w / target_out_res; |
|
|
| GGML_ASSERT(current_w % target_out_res == 0); |
|
|
| |
| cur = ggml_pool_2d(ctx0, cur, GGML_OP_POOL_AVG, s, s, s, s, 0, 0); |
|
|
| } |
|
|
| |
| if (model.msfa_concat_norm_w) { |
| cur = rms_norm_2d(cur, model.msfa_concat_norm_w); |
|
|
| } |
| } |
|
|
| |
| |
| int W = cur->ne[0]; |
| int H = cur->ne[1]; |
| int C = cur->ne[2]; |
| int B = cur->ne[3]; |
|
|
| GGML_ASSERT(C == hparams.n_embd); |
|
|
| |
| |
| cur = ggml_permute(ctx0, cur, 2, 1, 0, 3); |
| cur = ggml_permute(ctx0, cur, 0, 2, 1, 3); |
| cur = ggml_cont(ctx0, cur); |
| cur = ggml_reshape_3d(ctx0, cur, C, W*H, B); |
| cur = ggml_cont(ctx0, cur); |
|
|
|
|
| |
| |
| const float scale_factor = sqrtf((float)C); |
| cur = ggml_scale(ctx0, cur, scale_factor); |
|
|
|
|
| |
| |
| |
| { |
| const float eps = 1e-6f; |
| cur = ggml_rms_norm(ctx0, cur, eps); |
|
|
| if (model.mm_soft_emb_norm_w) { |
| |
| cur = ggml_mul(ctx0, cur, model.mm_soft_emb_norm_w); |
| } |
|
|
| } |
|
|
| |
| |
| |
| if (model.mm_input_proj_w) { |
| cur = ggml_mul_mat(ctx0, model.mm_input_proj_w, cur); |
| } |
|
|
| |
| |
| |
| |
| { |
| const float eps = 1e-6f; |
| cur = ggml_rms_norm(ctx0, cur, eps); |
|
|
| if (model.mm_post_proj_norm_w) { |
| |
| cur = ggml_mul(ctx0, cur, model.mm_post_proj_norm_w); |
| } |
| } |
|
|
| ggml_build_forward_expand(gf, cur); |
| return gf; |
| } |
|
|