| #include "ggml-metal-common.h" |
|
|
| #include "ggml-impl.h" |
| #include "ggml-backend-impl.h" |
|
|
| #include <vector> |
|
|
| |
| |
| struct ggml_mem_range { |
| uint64_t pb; |
|
|
| uint64_t p0; |
| uint64_t p1; |
|
|
| ggml_mem_range_type pt; |
| }; |
|
|
| struct ggml_mem_ranges { |
| std::vector<ggml_mem_range> ranges; |
|
|
| int debug = 0; |
| }; |
|
|
| ggml_mem_ranges_t ggml_mem_ranges_init(int debug) { |
| auto * res = new ggml_mem_ranges; |
|
|
| res->ranges.reserve(256); |
| res->debug = debug; |
|
|
| return res; |
| } |
|
|
| void ggml_mem_ranges_free(ggml_mem_ranges_t mrs) { |
| delete mrs; |
| } |
|
|
| void ggml_mem_ranges_reset(ggml_mem_ranges_t mrs) { |
| mrs->ranges.clear(); |
| } |
|
|
| static bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, ggml_mem_range mr) { |
| mrs->ranges.push_back(mr); |
|
|
| return true; |
| } |
|
|
| static ggml_mem_range ggml_mem_range_from_tensor(const ggml_tensor * tensor, ggml_mem_range_type pt) { |
| |
| tensor = tensor->view_src ? tensor->view_src : tensor; |
|
|
| GGML_ASSERT(!tensor->view_src); |
|
|
| ggml_mem_range mr; |
|
|
| if (tensor->buffer) { |
| |
| |
| |
| |
| |
| mr = { |
| (uint64_t) tensor->buffer, |
| (uint64_t) tensor->data, |
| (uint64_t) tensor->data + ggml_backend_buft_get_alloc_size(tensor->buffer->buft, tensor), |
| pt, |
| }; |
| } else { |
| |
| |
| mr = { |
| (uint64_t) tensor, |
| 0, |
| 1024, |
| pt, |
| }; |
| }; |
|
|
| return mr; |
| } |
|
|
| static ggml_mem_range ggml_mem_range_from_tensor_src(const ggml_tensor * tensor) { |
| return ggml_mem_range_from_tensor(tensor, MEM_RANGE_TYPE_SRC); |
| } |
|
|
| static ggml_mem_range ggml_mem_range_from_tensor_dst(const ggml_tensor * tensor) { |
| return ggml_mem_range_from_tensor(tensor, MEM_RANGE_TYPE_DST); |
| } |
|
|
| static bool ggml_mem_ranges_add_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { |
| GGML_ASSERT(tensor); |
|
|
| ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor); |
|
|
| if (mrs->debug > 2) { |
| GGML_LOG_DEBUG("%s: add src range buf=%lld, [%lld, %lld)\n", __func__, mr.pb, mr.p0, mr.p1); |
| } |
|
|
| return ggml_mem_ranges_add(mrs, mr); |
| } |
|
|
| static bool ggml_mem_ranges_add_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { |
| GGML_ASSERT(tensor); |
|
|
| ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor); |
|
|
| if (mrs->debug > 2) { |
| GGML_LOG_DEBUG("%s: add dst range buf=%lld, [%lld, %lld)\n", __func__, mr.pb, mr.p0, mr.p1); |
| } |
|
|
| return ggml_mem_ranges_add(mrs, mr); |
| } |
|
|
| bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { |
| for (int i = 0; i < GGML_MAX_SRC; i++) { |
| if (tensor->src[i]) { |
| ggml_mem_ranges_add_src(mrs, tensor->src[i]); |
| } |
| } |
|
|
| return ggml_mem_ranges_add_dst(mrs, tensor); |
| } |
|
|
| static bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, ggml_mem_range mr) { |
| for (size_t i = 0; i < mrs->ranges.size(); i++) { |
| const auto & cmp = mrs->ranges[i]; |
|
|
| |
| if (mr.pb != cmp.pb) { |
| continue; |
| } |
|
|
| |
| if (mr.pt == MEM_RANGE_TYPE_SRC && cmp.pt == MEM_RANGE_TYPE_SRC) { |
| continue; |
| } |
|
|
| if (mr.p0 < cmp.p1 && mr.p1 >= cmp.p0) { |
| if (mrs->debug > 2) { |
| GGML_LOG_DEBUG("%s: the %s range buf=%lld, [%lld, %lld) overlaps with a previous %s range buf=%lld, [%lld, %lld)\n", |
| __func__, |
| mr.pt == MEM_RANGE_TYPE_SRC ? "src" : "dst", |
| mr.pb, mr.p0, mr.p1, |
| cmp.pt == MEM_RANGE_TYPE_SRC ? "src" : "dst", |
| cmp.pb, cmp.p0, cmp.p1); |
| } |
|
|
| return false; |
| } |
| } |
|
|
| return true; |
| } |
|
|
| static bool ggml_mem_ranges_check_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { |
| GGML_ASSERT(tensor); |
|
|
| ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor); |
|
|
| const bool res = ggml_mem_ranges_check(mrs, mr); |
|
|
| return res; |
| } |
|
|
| static bool ggml_mem_ranges_check_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { |
| GGML_ASSERT(tensor); |
|
|
| ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor); |
|
|
| const bool res = ggml_mem_ranges_check(mrs, mr); |
|
|
| return res; |
| } |
|
|
| bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { |
| for (int i = 0; i < GGML_MAX_SRC; i++) { |
| if (tensor->src[i]) { |
| if (!ggml_mem_ranges_check_src(mrs, tensor->src[i])) { |
| return false; |
| } |
| } |
| } |
|
|
| return ggml_mem_ranges_check_dst(mrs, tensor); |
| } |
|
|
| struct node_info { |
| ggml_tensor * node; |
|
|
| std::vector<ggml_tensor *> fused; |
|
|
| ggml_op op() const { |
| return node->op; |
| } |
|
|
| const ggml_tensor * dst() const { |
| return fused.empty() ? node : fused.back(); |
| } |
|
|
| bool is_empty() const { |
| return ggml_op_is_empty(node->op); |
| } |
|
|
| void add_fused(ggml_tensor * t) { |
| fused.push_back(t); |
| } |
| }; |
|
|
| static std::vector<int> ggml_metal_graph_optimize_reorder(const std::vector<node_info> & nodes) { |
| |
| const auto & h_add = [](ggml_mem_ranges_t mrs, const node_info & node) { |
| for (int i = 0; i < GGML_MAX_SRC; i++) { |
| if (node.node->src[i]) { |
| if (!ggml_mem_ranges_add_src(mrs, node.node->src[i])) { |
| return false; |
| } |
| } |
| } |
|
|
| |
| for (const auto * fused : node.fused) { |
| for (int i = 0; i < GGML_MAX_SRC; i++) { |
| if (fused->src[i]) { |
| if (!ggml_mem_ranges_add_src(mrs, fused->src[i])) { |
| return false; |
| } |
| } |
| } |
| } |
|
|
| return ggml_mem_ranges_add_dst(mrs, node.dst()); |
| }; |
|
|
| |
| const auto & h_check = [](ggml_mem_ranges_t mrs, const node_info & node) { |
| for (int i = 0; i < GGML_MAX_SRC; i++) { |
| if (node.node->src[i]) { |
| if (!ggml_mem_ranges_check_src(mrs, node.node->src[i])) { |
| return false; |
| } |
| } |
| } |
|
|
| for (const auto * fused : node.fused) { |
| for (int i = 0; i < GGML_MAX_SRC; i++) { |
| if (fused->src[i]) { |
| if (!ggml_mem_ranges_check_src(mrs, fused->src[i])) { |
| return false; |
| } |
| } |
| } |
| } |
|
|
| return ggml_mem_ranges_check_dst(mrs, node.dst()); |
| }; |
|
|
| |
| |
| const auto & h_safe = [](ggml_op op) { |
| switch (op) { |
| case GGML_OP_MUL_MAT: |
| case GGML_OP_MUL_MAT_ID: |
| case GGML_OP_ROPE: |
| case GGML_OP_NORM: |
| case GGML_OP_RMS_NORM: |
| case GGML_OP_GROUP_NORM: |
| case GGML_OP_L2_NORM: |
| case GGML_OP_SUM_ROWS: |
| case GGML_OP_SSM_CONV: |
| case GGML_OP_SSM_SCAN: |
| case GGML_OP_CLAMP: |
| case GGML_OP_TRI: |
| case GGML_OP_DIAG: |
| case GGML_OP_MUL: |
| case GGML_OP_ADD: |
| case GGML_OP_SUB: |
| case GGML_OP_DIV: |
| case GGML_OP_GLU: |
| case GGML_OP_SCALE: |
| case GGML_OP_UNARY: |
| case GGML_OP_GET_ROWS: |
| case GGML_OP_SET_ROWS: |
| case GGML_OP_SET: |
| case GGML_OP_CPY: |
| case GGML_OP_CONT: |
| case GGML_OP_REPEAT: |
| return true; |
| default: |
| return ggml_op_is_empty(op); |
| } |
| }; |
|
|
| const int n = nodes.size(); |
|
|
| std::vector<int> res; |
| res.reserve(n); |
|
|
| std::vector<bool> used(n, false); |
|
|
| |
| ggml_mem_ranges_t mrs0 = ggml_mem_ranges_init(0); |
|
|
| |
| ggml_mem_ranges_t mrs1 = ggml_mem_ranges_init(0); |
|
|
| for (int i0 = 0; i0 < n; i0++) { |
| if (used[i0]) { |
| continue; |
| } |
|
|
| const auto & node0 = nodes[i0]; |
|
|
| |
| |
| |
| |
| if (!node0.is_empty() && !h_check(mrs0, node0)) { |
| |
| |
| ggml_mem_ranges_reset(mrs1); |
|
|
| |
| h_add(mrs1, node0); |
|
|
| |
| constexpr int N_FORWARD = 64; |
|
|
| for (int i1 = i0 + 1; i1 < i0 + N_FORWARD && i1 < n; i1++) { |
| if (used[i1]) { |
| continue; |
| } |
|
|
| const auto & node1 = nodes[i1]; |
|
|
| |
| if (!h_safe(node1.op())) { |
| break; |
| } |
|
|
| const bool is_empty = node1.is_empty(); |
|
|
| |
| |
| |
| if ((is_empty || h_check(mrs0, node1)) && h_check(mrs1, node1)) { |
| |
| h_add(mrs0, node1); |
| res.push_back(i1); |
|
|
| |
| used[i1] = true; |
| } else { |
| |
| h_add(mrs1, node1); |
| } |
| } |
|
|
| |
| ggml_mem_ranges_reset(mrs0); |
| } |
|
|
| |
| { |
| h_add(mrs0, node0); |
| res.push_back(i0); |
| } |
| } |
|
|
| ggml_mem_ranges_free(mrs0); |
| ggml_mem_ranges_free(mrs1); |
|
|
| return res; |
| } |
|
|
| void ggml_graph_optimize(ggml_cgraph * gf) { |
| constexpr int MAX_FUSE = 16; |
|
|
| const int n = gf->n_nodes; |
|
|
| enum ggml_op ops[MAX_FUSE]; |
|
|
| std::vector<node_info> nodes; |
| nodes.reserve(gf->n_nodes); |
|
|
| |
| |
| |
| for (int i = 0; i < n; i++) { |
| node_info node = { |
| gf->nodes[i], |
| {}, |
| }; |
|
|
| |
| |
| if (node.op() == GGML_OP_ADD || |
| node.op() == GGML_OP_NORM || |
| node.op() == GGML_OP_RMS_NORM) { |
| ops[0] = node.op(); |
|
|
| int f = i + 1; |
| while (f < n && f < i + MAX_FUSE) { |
| |
| |
| if (gf->nodes[f]->op != GGML_OP_ADD && |
| gf->nodes[f]->op != GGML_OP_MUL && |
| gf->nodes[f]->op != GGML_OP_NORM && |
| gf->nodes[f]->op != GGML_OP_RMS_NORM) { |
| break; |
| } |
| ops[f - i] = gf->nodes[f]->op; |
| f++; |
| } |
|
|
| f -= i; |
| for (; f > 1; f--) { |
| if (ggml_can_fuse(gf, i, ops, f)) { |
| break; |
| } |
| } |
|
|
| |
| for (int k = 1; k < f; k++) { |
| ++i; |
|
|
| |
| node.add_fused(gf->nodes[i]); |
| } |
| } |
|
|
| nodes.push_back(std::move(node)); |
| } |
|
|
| #if 1 |
| |
| const auto order = ggml_metal_graph_optimize_reorder(nodes); |
| #else |
| std::vector<int> order(nodes.size()); |
| for (size_t i = 0; i < nodes.size(); i++) { |
| order[i] = i; |
| } |
| #endif |
|
|
| |
| { |
| int j = 0; |
| for (const auto i : order) { |
| const auto & node = nodes[i]; |
|
|
| gf->nodes[j++] = node.node; |
|
|
| for (auto * fused : node.fused) { |
| gf->nodes[j++] = fused; |
| } |
| } |
| } |
| } |
|
|