| | #include "ggml-alloc.h"
|
| | #include "ggml-backend-impl.h"
|
| | #include "ggml.h"
|
| | #include "ggml-impl.h"
|
| | #include <assert.h>
|
| | #include <limits.h>
|
| | #include <stdarg.h>
|
| | #include <stdio.h>
|
| | #include <stdlib.h>
|
| | #include <string.h>
|
| |
|
| | #define MAX(a, b) ((a) > (b) ? (a) : (b))
|
| | #define MAX_FREE_BLOCKS 256
|
| |
|
| |
|
| |
|
| |
|
| | #define AT_PRINTF(...)
|
| |
|
| |
|
| | bool ggml_op_can_inplace(enum ggml_op op) {
|
| | switch (op) {
|
| | case GGML_OP_FILL:
|
| | case GGML_OP_SCALE:
|
| | case GGML_OP_DIAG_MASK_ZERO:
|
| | case GGML_OP_DIAG_MASK_INF:
|
| | case GGML_OP_ADD:
|
| | case GGML_OP_ADD_ID:
|
| | case GGML_OP_ADD1:
|
| | case GGML_OP_SUB:
|
| | case GGML_OP_MUL:
|
| | case GGML_OP_DIV:
|
| | case GGML_OP_SQR:
|
| | case GGML_OP_SQRT:
|
| | case GGML_OP_LOG:
|
| | case GGML_OP_UNARY:
|
| | case GGML_OP_ROPE:
|
| | case GGML_OP_ROPE_BACK:
|
| | case GGML_OP_SILU_BACK:
|
| | case GGML_OP_RMS_NORM:
|
| | case GGML_OP_RMS_NORM_BACK:
|
| | case GGML_OP_SOFT_MAX:
|
| | case GGML_OP_SOFT_MAX_BACK:
|
| | return true;
|
| |
|
| | default:
|
| | return false;
|
| | }
|
| | }
|
| |
|
| | static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
|
| | assert(alignment && !(alignment & (alignment - 1)));
|
| | size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
|
| | return offset + align;
|
| | }
|
| |
|
| |
|
| |
|
| | struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer) {
|
| | void * base = ggml_backend_buffer_get_base(buffer);
|
| | size_t align = ggml_backend_buffer_get_alignment(buffer);
|
| |
|
| | assert(align && !(align & (align - 1)));
|
| |
|
| | struct ggml_tallocr talloc = (struct ggml_tallocr) {
|
| | buffer,
|
| | base,
|
| | align,
|
| | aligned_offset(base, 0, align),
|
| | };
|
| | return talloc;
|
| | }
|
| |
|
| | enum ggml_status ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor) {
|
| | size_t size = ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor);
|
| | size = GGML_PAD(size, talloc->alignment);
|
| |
|
| | if (talloc->offset + size > ggml_backend_buffer_get_size(talloc->buffer)) {
|
| | GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
|
| | __func__, tensor->name, size, ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
|
| | GGML_ABORT("not enough space in the buffer");
|
| | }
|
| |
|
| | void * addr = (char *)ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset;
|
| | talloc->offset += size;
|
| |
|
| | assert(((uintptr_t)addr % talloc->alignment) == 0);
|
| |
|
| | return ggml_backend_tensor_alloc(talloc->buffer, tensor, addr);
|
| | }
|
| |
|
| |
|
| |
|
| | #define GGML_VBUFFER_MAX_CHUNKS 16
|
| |
|
| |
|
| | struct buffer_address {
|
| | int chunk;
|
| | size_t offset;
|
| | };
|
| |
|
| | static const struct buffer_address GGML_BUFFER_ADDRESS_INVALID = { -1, SIZE_MAX };
|
| |
|
| | static bool ggml_buffer_address_less(struct buffer_address a, struct buffer_address b) {
|
| | return a.chunk != b.chunk ? a.chunk < b.chunk : a.offset < b.offset;
|
| | }
|
| |
|
| | struct free_block {
|
| | size_t offset;
|
| | size_t size;
|
| | };
|
| |
|
| | struct tallocr_chunk {
|
| | struct free_block free_blocks[MAX_FREE_BLOCKS];
|
| | int n_free_blocks;
|
| | size_t max_size;
|
| | };
|
| |
|
| | struct ggml_dyn_tallocr {
|
| | size_t alignment;
|
| | size_t max_chunk_size;
|
| | struct tallocr_chunk * chunks[GGML_VBUFFER_MAX_CHUNKS];
|
| | int n_chunks;
|
| |
|
| | #ifdef GGML_ALLOCATOR_DEBUG
|
| | struct {
|
| | const struct ggml_tensor * tensor;
|
| | struct buffer_address addr;
|
| | } allocated_tensors[1024];
|
| | #endif
|
| | };
|
| |
|
| | static void ggml_dyn_tallocr_insert_block(struct tallocr_chunk * chunk, size_t offset, size_t size) {
|
| | GGML_ASSERT(chunk->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
|
| |
|
| | int insert_pos = 0;
|
| | while (insert_pos < chunk->n_free_blocks && chunk->free_blocks[insert_pos].offset < offset) {
|
| | insert_pos++;
|
| | }
|
| |
|
| | for (int i = chunk->n_free_blocks; i > insert_pos; i--) {
|
| | chunk->free_blocks[i] = chunk->free_blocks[i-1];
|
| | }
|
| |
|
| | chunk->free_blocks[insert_pos].offset = offset;
|
| | chunk->free_blocks[insert_pos].size = size;
|
| | chunk->n_free_blocks++;
|
| | }
|
| |
|
| | static void ggml_dyn_tallocr_remove_block(struct tallocr_chunk * chunk, int idx) {
|
| |
|
| | for (int i = idx; i < chunk->n_free_blocks; i++) {
|
| | chunk->free_blocks[i] = chunk->free_blocks[i+1];
|
| | }
|
| | chunk->n_free_blocks--;
|
| | }
|
| |
|
| | static int ggml_dyn_tallocr_new_chunk(struct ggml_dyn_tallocr * alloc, size_t min_size) {
|
| | if (alloc->n_chunks >= GGML_VBUFFER_MAX_CHUNKS) {
|
| | return -1;
|
| | }
|
| | struct tallocr_chunk * chunk = calloc(1, sizeof(struct tallocr_chunk));
|
| | chunk->n_free_blocks = 1;
|
| | chunk->free_blocks[0].offset = 0;
|
| |
|
| |
|
| |
|
| |
|
| | chunk->free_blocks[0].size = MAX(min_size, alloc->max_chunk_size);
|
| | if (alloc->n_chunks == GGML_VBUFFER_MAX_CHUNKS - 1) {
|
| | chunk->free_blocks[0].size = SIZE_MAX/2;
|
| | }
|
| | alloc->chunks[alloc->n_chunks] = chunk;
|
| | alloc->n_chunks++;
|
| | return alloc->n_chunks - 1;
|
| | }
|
| |
|
| | #ifdef GGML_ALLOCATOR_DEBUG
|
| | static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
|
| | for (int i = 0; i < 1024; i++) {
|
| | if (alloc->allocated_tensors[i].tensor == NULL) {
|
| | alloc->allocated_tensors[i].tensor = tensor;
|
| | alloc->allocated_tensors[i].addr = addr;
|
| | return;
|
| | }
|
| | }
|
| | GGML_ABORT("out of allocated_tensors");
|
| | }
|
| | static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
|
| | for (int i = 0; i < 1024; i++) {
|
| | if (alloc->allocated_tensors[i].addr.chunk == addr.chunk && alloc->allocated_tensors[i].addr.offset == addr.offset) {
|
| | alloc->allocated_tensors[i].tensor = NULL;
|
| | return;
|
| | }
|
| | }
|
| | GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
|
| | }
|
| | #endif
|
| |
|
| | static struct buffer_address ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) {
|
| | size = aligned_offset(NULL, size, alloc->alignment);
|
| |
|
| | AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
|
| |
|
| | int best_fit_chunk = -1;
|
| | int best_fit_block = -1;
|
| | size_t max_avail = 0;
|
| |
|
| |
|
| | for (int c = 0; c < alloc->n_chunks; ++c) {
|
| | struct tallocr_chunk * chunk = alloc->chunks[c];
|
| | size_t best_fit_size = SIZE_MAX;
|
| | for (int i = 0; i < chunk->n_free_blocks - 1; i++) {
|
| | struct free_block * block = &chunk->free_blocks[i];
|
| | max_avail = MAX(max_avail, block->size);
|
| | if (block->size >= size && block->size <= best_fit_size) {
|
| | best_fit_chunk = c;
|
| | best_fit_block = i;
|
| | best_fit_size = block->size;
|
| | }
|
| | }
|
| | }
|
| |
|
| | if (best_fit_block == -1) {
|
| |
|
| | int64_t best_reuse = INT64_MIN;
|
| | for (int c = 0; c < alloc->n_chunks; ++c) {
|
| | struct tallocr_chunk * chunk = alloc->chunks[c];
|
| | if (chunk->n_free_blocks > 0) {
|
| | struct free_block * block = &chunk->free_blocks[chunk->n_free_blocks - 1];
|
| | max_avail = MAX(max_avail, block->size);
|
| | int64_t reuse_factor = chunk->max_size - block->offset - size;
|
| |
|
| |
|
| |
|
| | bool better_reuse = best_reuse < 0 && reuse_factor > best_reuse;
|
| | bool better_fit = reuse_factor >= 0 && reuse_factor < best_reuse;
|
| | if (block->size >= size && (better_reuse || better_fit)) {
|
| | best_fit_chunk = c;
|
| | best_fit_block = chunk->n_free_blocks - 1;
|
| | best_reuse = reuse_factor;
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | if (best_fit_block == -1) {
|
| |
|
| | best_fit_chunk = ggml_dyn_tallocr_new_chunk(alloc, size);
|
| | best_fit_block = 0;
|
| | }
|
| | if (best_fit_chunk == -1) {
|
| |
|
| | GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
|
| | __func__, size, max_avail);
|
| | GGML_ABORT("graph allocation: failed to reserve memory");
|
| | }
|
| |
|
| | struct tallocr_chunk * chunk = alloc->chunks[best_fit_chunk];
|
| | struct free_block * block = &chunk->free_blocks[best_fit_block];
|
| | struct buffer_address addr = {.chunk = best_fit_chunk, .offset = block->offset };
|
| | block->offset += size;
|
| | block->size -= size;
|
| | if (block->size == 0) {
|
| |
|
| | ggml_dyn_tallocr_remove_block(chunk, best_fit_block);
|
| | }
|
| |
|
| | AT_PRINTF("block %d, offset %zu, chunk %d\n", best_fit_block, addr.offset, addr.chunk);
|
| |
|
| | #ifdef GGML_ALLOCATOR_DEBUG
|
| | add_allocated_tensor(alloc, addr, tensor);
|
| | size_t cur_max = addr.offset + size;
|
| | if (cur_max > chunk->max_size) {
|
| |
|
| | for (int i = 0; i < 1024; i++) {
|
| | for (int j = i + 1; j < 1024; j++) {
|
| | if (ggml_buffer_address_less(alloc->allocated_tensors[j].addr, alloc->allocated_tensors[i].addr)) {
|
| | const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
|
| | struct buffer_address tmp_addr = alloc->allocated_tensors[i].addr;
|
| | alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
|
| | alloc->allocated_tensors[i].addr = alloc->allocated_tensors[j].addr;
|
| | alloc->allocated_tensors[j].tensor = tmp_tensor;
|
| | alloc->allocated_tensors[j].addr = tmp_addr;
|
| | }
|
| | }
|
| | }
|
| | GGML_LOG_DEBUG("max_size[%d] = %.2f MB: tensors: ", addr.chunk, cur_max / 1024.0 / 1024.0);
|
| | for (int i = 0; i < 1024; i++) {
|
| | if (alloc->allocated_tensors[i].tensor) {
|
| | GGML_LOG_DEBUG("%s [%d: %zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
|
| | alloc->allocated_tensors[i].addr.chunk,
|
| | alloc->allocated_tensors[i].addr.offset,
|
| | alloc->allocated_tensors[i].addr.offset + ggml_nbytes(alloc->allocated_tensors[i].tensor),
|
| | ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
|
| | }
|
| | }
|
| | GGML_LOG_DEBUG("\n");
|
| | }
|
| | #endif
|
| |
|
| | chunk->max_size = MAX(chunk->max_size, addr.offset + size);
|
| |
|
| | return addr;
|
| |
|
| | GGML_UNUSED(tensor);
|
| | }
|
| |
|
| |
|
| | static void ggml_dyn_tallocr_free_bytes(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, size_t size) {
|
| | size = aligned_offset(NULL, size, alloc->alignment);
|
| |
|
| | struct tallocr_chunk * chunk = alloc->chunks[addr.chunk];
|
| |
|
| |
|
| | for (int i = 0; i < chunk->n_free_blocks; i++) {
|
| | struct free_block * block = &chunk->free_blocks[i];
|
| |
|
| | if (block->offset + block->size == addr.offset) {
|
| | block->size += size;
|
| |
|
| | if (i < chunk->n_free_blocks - 1) {
|
| | struct free_block * next = &chunk->free_blocks[i+1];
|
| | if (block->offset + block->size == next->offset) {
|
| | block->size += next->size;
|
| | ggml_dyn_tallocr_remove_block(chunk, i+1);
|
| | }
|
| | }
|
| | return;
|
| | }
|
| |
|
| | if (addr.offset + size == block->offset) {
|
| | block->offset = addr.offset;
|
| | block->size += size;
|
| |
|
| | if (i > 0) {
|
| | struct free_block * prev = &chunk->free_blocks[i-1];
|
| | if (prev->offset + prev->size == block->offset) {
|
| | prev->size += block->size;
|
| | ggml_dyn_tallocr_remove_block(chunk, i);
|
| | }
|
| | }
|
| | return;
|
| | }
|
| | }
|
| |
|
| | ggml_dyn_tallocr_insert_block(chunk, addr.offset, size);
|
| | }
|
| |
|
| | static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) {
|
| | for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; i++) {
|
| | free(alloc->chunks[i]);
|
| | alloc->chunks[i] = NULL;
|
| | }
|
| | alloc->n_chunks = 0;
|
| |
|
| | #ifdef GGML_ALLOCATOR_DEBUG
|
| | for (int i = 0; i < 1024; i++) {
|
| | alloc->allocated_tensors[i].tensor = NULL;
|
| | }
|
| | #endif
|
| | }
|
| |
|
| | static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment, size_t max_buffer_size) {
|
| | struct ggml_dyn_tallocr * alloc = (struct ggml_dyn_tallocr *)malloc(sizeof(struct ggml_dyn_tallocr));
|
| |
|
| | *alloc = (struct ggml_dyn_tallocr) {
|
| | alignment,
|
| | MIN(max_buffer_size, SIZE_MAX/2),
|
| | {NULL},
|
| | 0,
|
| | #ifdef GGML_ALLOCATOR_DEBUG
|
| | {{0}},
|
| | #endif
|
| | };
|
| |
|
| | ggml_dyn_tallocr_reset(alloc);
|
| |
|
| | return alloc;
|
| | }
|
| |
|
| | static void ggml_dyn_tallocr_free(struct ggml_dyn_tallocr * alloc) {
|
| | for (int i = 0; i < alloc->n_chunks; ++i) {
|
| | free(alloc->chunks[i]);
|
| | }
|
| | free(alloc);
|
| | }
|
| |
|
| | static size_t ggml_dyn_tallocr_max_size(struct ggml_dyn_tallocr * alloc, int chunk) {
|
| | return chunk < alloc->n_chunks ? alloc->chunks[chunk]->max_size : 0;
|
| | }
|
| |
|
| |
|
| |
|
| |
|
| | struct vbuffer {
|
| | ggml_backend_buffer_t chunks[GGML_VBUFFER_MAX_CHUNKS];
|
| | };
|
| |
|
| | static void ggml_vbuffer_free(struct vbuffer * buf) {
|
| | if (buf == NULL) {
|
| | return;
|
| | }
|
| | for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; ++i) {
|
| | ggml_backend_buffer_free(buf->chunks[i]);
|
| | }
|
| | free(buf);
|
| | }
|
| |
|
| | static size_t ggml_vbuffer_chunk_size(struct vbuffer * buf, int chunk) {
|
| | return buf->chunks[chunk] ? ggml_backend_buffer_get_size(buf->chunks[chunk]) : 0;
|
| | }
|
| |
|
| | static size_t ggml_vbuffer_size(struct vbuffer * buf) {
|
| | size_t size = 0;
|
| | for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
|
| | size += ggml_backend_buffer_get_size(buf->chunks[i]);
|
| | }
|
| | return size;
|
| | }
|
| |
|
| | static struct vbuffer * ggml_vbuffer_alloc(ggml_backend_buffer_type_t buft, const struct ggml_dyn_tallocr * talloc, enum ggml_backend_buffer_usage usage) {
|
| | struct vbuffer * buf = (struct vbuffer *)calloc(1, sizeof(struct vbuffer));
|
| | if (buf == NULL) {
|
| | return NULL;
|
| | }
|
| |
|
| | for (int n = 0; n < talloc->n_chunks; n++) {
|
| | size_t chunk_size = talloc->chunks[n]->max_size;
|
| | buf->chunks[n] = ggml_backend_buft_alloc_buffer(buft, chunk_size);
|
| | if (buf->chunks[n] == NULL) {
|
| | ggml_vbuffer_free(buf);
|
| | return NULL;
|
| | }
|
| | ggml_backend_buffer_set_usage(buf->chunks[n], usage);
|
| | }
|
| | return buf;
|
| | }
|
| |
|
| | static void ggml_vbuffer_tensor_alloc(struct vbuffer * buf, struct ggml_tensor * tensor, struct buffer_address buf_addr) {
|
| | void * base = ggml_backend_buffer_get_base(buf->chunks[buf_addr.chunk]);
|
| | void * addr = (char *)base + buf_addr.offset;
|
| | ggml_backend_tensor_alloc(buf->chunks[buf_addr.chunk], tensor, addr);
|
| | }
|
| |
|
| | static void ggml_vbuffer_reset(struct vbuffer * buf) {
|
| | for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
|
| | ggml_backend_buffer_reset(buf->chunks[i]);
|
| | }
|
| | }
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | struct hash_node {
|
| | int n_children;
|
| | int n_views;
|
| | int buffer_id;
|
| | struct buffer_address addr;
|
| | bool allocated;
|
| | };
|
| |
|
| | struct tensor_alloc {
|
| | int buffer_id;
|
| | struct buffer_address addr;
|
| | size_t size_max;
|
| | };
|
| |
|
| | struct leaf_alloc {
|
| | struct tensor_alloc leaf;
|
| | };
|
| |
|
| | struct node_alloc {
|
| | struct tensor_alloc dst;
|
| | struct tensor_alloc src[GGML_MAX_SRC];
|
| | };
|
| |
|
| | struct ggml_gallocr {
|
| | ggml_backend_buffer_type_t * bufts;
|
| | struct vbuffer ** buffers;
|
| | struct ggml_dyn_tallocr ** buf_tallocs;
|
| | int n_buffers;
|
| |
|
| | struct ggml_hash_set hash_set;
|
| | struct hash_node * hash_values;
|
| |
|
| | struct node_alloc * node_allocs;
|
| | int n_nodes;
|
| |
|
| | struct leaf_alloc * leaf_allocs;
|
| | int n_leafs;
|
| | };
|
| |
|
| | ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs) {
|
| | ggml_gallocr_t galloc = (ggml_gallocr_t)calloc(1, sizeof(struct ggml_gallocr));
|
| | GGML_ASSERT(galloc != NULL);
|
| |
|
| | galloc->bufts = calloc(n_bufs, sizeof(ggml_backend_buffer_type_t));
|
| | GGML_ASSERT(galloc->bufts != NULL);
|
| |
|
| | galloc->buffers = calloc(n_bufs, sizeof(struct vbuffer *));
|
| | GGML_ASSERT(galloc->buffers != NULL);
|
| |
|
| | galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *));
|
| | GGML_ASSERT(galloc->buf_tallocs != NULL);
|
| |
|
| | for (int i = 0; i < n_bufs; i++) {
|
| | galloc->bufts[i] = bufts[i];
|
| | galloc->buffers[i] = NULL;
|
| |
|
| |
|
| | for (int j = 0; j < i; j++) {
|
| | if (bufts[i] == bufts[j]) {
|
| | galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
|
| | break;
|
| | }
|
| | }
|
| |
|
| | if (galloc->buf_tallocs[i] == NULL) {
|
| | size_t alignment = ggml_backend_buft_get_alignment(bufts[i]);
|
| | size_t max_size = ggml_backend_buft_get_max_size(bufts[i]);
|
| | galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment, max_size);
|
| | }
|
| | }
|
| | galloc->n_buffers = n_bufs;
|
| |
|
| | return galloc;
|
| | }
|
| |
|
| | ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft) {
|
| | return ggml_gallocr_new_n(&buft, 1);
|
| | }
|
| |
|
| | void ggml_gallocr_free(ggml_gallocr_t galloc) {
|
| | if (galloc == NULL) {
|
| | return;
|
| | }
|
| |
|
| | for (int i = 0; i < galloc->n_buffers; i++) {
|
| | if (galloc->buffers != NULL) {
|
| |
|
| | bool freed = false;
|
| | for (int j = 0; j < i; j++) {
|
| | if (galloc->buffers[j] == galloc->buffers[i]) {
|
| | freed = true;
|
| | break;
|
| | }
|
| | }
|
| | if (!freed) {
|
| | ggml_vbuffer_free(galloc->buffers[i]);
|
| | }
|
| | }
|
| | if (galloc->buf_tallocs != NULL) {
|
| |
|
| | bool freed = false;
|
| | for (int j = 0; j < i; j++) {
|
| | if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
|
| | freed = true;
|
| | break;
|
| | }
|
| | }
|
| | if (!freed) {
|
| | ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
|
| | }
|
| | }
|
| | }
|
| |
|
| | ggml_hash_set_free(&galloc->hash_set);
|
| | free(galloc->hash_values);
|
| | free(galloc->bufts);
|
| | free(galloc->buffers);
|
| | free(galloc->buf_tallocs);
|
| | free(galloc->node_allocs);
|
| | free(galloc->leaf_allocs);
|
| | free(galloc);
|
| | }
|
| |
|
| | typedef struct ggml_gallocr * ggml_gallocr_t;
|
| |
|
| | static struct hash_node * ggml_gallocr_hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) {
|
| | size_t i = ggml_hash_find_or_insert(&galloc->hash_set, t);
|
| | return &galloc->hash_values[i];
|
| | }
|
| |
|
| | static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) {
|
| | return ggml_gallocr_hash_get(galloc, t)->allocated;
|
| | }
|
| |
|
| | static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) {
|
| | return t->data != NULL
|
| | || t->buffer
|
| | || ggml_gallocr_is_own(galloc, t);
|
| | }
|
| |
|
| |
|
| | static void ggml_gallocr_free_extra_space(ggml_gallocr_t galloc, struct ggml_tensor * node, struct ggml_tensor * parent) {
|
| | struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
|
| | struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent);
|
| |
|
| | size_t parent_size = ggml_backend_buft_get_alloc_size(galloc->bufts[p_hn->buffer_id], parent);
|
| | size_t node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
|
| |
|
| | GGML_ASSERT(parent_size >= node_size);
|
| |
|
| |
|
| | struct ggml_dyn_tallocr * p_alloc = galloc->buf_tallocs[p_hn->buffer_id];
|
| | parent_size = aligned_offset(NULL, parent_size, p_alloc->alignment);
|
| | node_size = aligned_offset(NULL, node_size, p_alloc->alignment);
|
| |
|
| | if (parent_size > node_size) {
|
| | struct buffer_address p_addr = p_hn->addr;
|
| | p_addr.offset += node_size;
|
| | size_t extra_size = parent_size - node_size;
|
| | AT_PRINTF("freeing extra %zu bytes from parent %s for %s\n", extra_size, parent->name, node->name);
|
| | ggml_dyn_tallocr_free_bytes(p_alloc, p_addr, extra_size);
|
| | }
|
| | }
|
| |
|
| | static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) {
|
| | GGML_ASSERT(buffer_id >= 0);
|
| | struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
|
| |
|
| | if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_impl_is_view(node)) {
|
| | hn->allocated = true;
|
| | assert(hn->addr.offset == 0);
|
| |
|
| |
|
| | if (ggml_op_can_inplace(node->op)) {
|
| | for (int i = 0; i < GGML_MAX_SRC; i++) {
|
| | struct ggml_tensor * parent = node->src[i];
|
| | if (parent == NULL) {
|
| | continue;
|
| | }
|
| |
|
| |
|
| | if (!ggml_gallocr_is_own(galloc, parent)) {
|
| | AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
|
| | continue;
|
| | }
|
| |
|
| |
|
| | if (parent->flags & GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & GGML_TENSOR_FLAG_OUTPUT)) {
|
| | AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
|
| | continue;
|
| | }
|
| |
|
| | if (!ggml_are_same_layout(node, parent)) {
|
| | AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
|
| | continue;
|
| | }
|
| |
|
| | struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent);
|
| | if (p_hn->n_children == 1 && p_hn->n_views == 0) {
|
| | if (ggml_impl_is_view(parent)) {
|
| | struct ggml_tensor * view_src = parent->view_src;
|
| | struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src);
|
| | if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
|
| | AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
|
| | assert(view_src_hn->addr.chunk == p_hn->addr.chunk && view_src_hn->addr.offset == p_hn->addr.offset);
|
| | hn->buffer_id = p_hn->buffer_id;
|
| | hn->addr = p_hn->addr;
|
| | p_hn->allocated = false;
|
| | view_src_hn->allocated = false;
|
| | ggml_gallocr_free_extra_space(galloc, node, view_src);
|
| | return;
|
| | }
|
| | } else {
|
| | AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
|
| | hn->buffer_id = p_hn->buffer_id;
|
| | hn->addr = p_hn->addr;
|
| | p_hn->allocated = false;
|
| | ggml_gallocr_free_extra_space(galloc, node, parent);
|
| | return;
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
|
| | ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
|
| | size_t size = ggml_backend_buft_get_alloc_size(buft, node);
|
| | hn->buffer_id = buffer_id;
|
| | hn->addr = ggml_dyn_tallocr_alloc(alloc, size, node);
|
| | }
|
| | }
|
| |
|
| | static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) {
|
| |
|
| | if (node->flags & GGML_TENSOR_FLAG_OUTPUT) {
|
| | AT_PRINTF("not freeing output %s\n", node->name);
|
| | return;
|
| | }
|
| |
|
| | struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
|
| | int buffer_id = hn->buffer_id;
|
| | struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
|
| | ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
|
| | size_t size = ggml_backend_buft_get_alloc_size(buft, node);
|
| |
|
| | AT_PRINTF("%s: freeing %s at {chunk=%d, offset=%zu} (%zu bytes) - n_free_blocks = %d\n",
|
| | __func__, node->name, hn->addr.chunk, hn->addr.offset, size, alloc->chunks[hn->addr.chunk]->n_free_blocks);
|
| | #ifdef GGML_ALLOCATOR_DEBUG
|
| | remove_allocated_tensor(alloc, hn->addr, node);
|
| | #endif
|
| |
|
| | ggml_dyn_tallocr_free_bytes(alloc, hn->addr, size);
|
| | hn->allocated = false;
|
| | }
|
| |
|
| | static int get_node_buffer_id(const int * node_buffer_ids, int i) {
|
| | return node_buffer_ids ? node_buffer_ids[i] : 0;
|
| | }
|
| |
|
| | static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
|
| |
|
| | ggml_hash_set_reset(&galloc->hash_set);
|
| | memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size);
|
| |
|
| |
|
| |
|
| | for (int i = 0; i < graph->n_leafs; i++) {
|
| | struct ggml_tensor * leaf = graph->leafs[i];
|
| | ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i));
|
| | }
|
| |
|
| |
|
| |
|
| | for (int i = 0; i < graph->n_nodes; i++) {
|
| | struct ggml_tensor * node = graph->nodes[i];
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | if (ggml_impl_is_view(node) && node->op != GGML_OP_NONE) {
|
| | struct ggml_tensor * view_src = node->view_src;
|
| | ggml_gallocr_hash_get(galloc, view_src)->n_views += 1;
|
| | }
|
| |
|
| | if (node->flags & GGML_TENSOR_FLAG_INPUT) {
|
| | ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i));
|
| | }
|
| |
|
| | for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| | struct ggml_tensor * src = node->src[j];
|
| | if (src == NULL) {
|
| | continue;
|
| | }
|
| |
|
| | ggml_gallocr_hash_get(galloc, src)->n_children += 1;
|
| |
|
| |
|
| | if (src->flags & GGML_TENSOR_FLAG_INPUT) {
|
| | ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i));
|
| | }
|
| | }
|
| | }
|
| |
|
| |
|
| | for (int i = 0; i < graph->n_nodes; i++) {
|
| | struct ggml_tensor * node = graph->nodes[i];
|
| | int buffer_id = get_node_buffer_id(node_buffer_ids, i);
|
| |
|
| |
|
| | for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| | struct ggml_tensor * parent = node->src[j];
|
| | if (parent == NULL) {
|
| | continue;
|
| | }
|
| | ggml_gallocr_allocate_node(galloc, parent, buffer_id);
|
| | }
|
| |
|
| |
|
| | ggml_gallocr_allocate_node(galloc, node, buffer_id);
|
| |
|
| | AT_PRINTF("exec: %s (%s) <= ", ggml_op_desc(node), node->name);
|
| | for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| | struct ggml_tensor * parent = node->src[j];
|
| | if (parent == NULL) {
|
| | continue;
|
| | }
|
| | AT_PRINTF("%s", parent->name);
|
| | if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
|
| | AT_PRINTF(", ");
|
| | }
|
| | }
|
| | AT_PRINTF("\n");
|
| |
|
| |
|
| | for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| | struct ggml_tensor * parent = node->src[j];
|
| | if (parent == NULL) {
|
| | continue;
|
| | }
|
| | struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent);
|
| | p_hn->n_children -= 1;
|
| |
|
| | AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
|
| | parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
|
| |
|
| | if (p_hn->n_children == 0 && p_hn->n_views == 0) {
|
| | if (ggml_impl_is_view(parent)) {
|
| | struct ggml_tensor * view_src = parent->view_src;
|
| | struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src);
|
| | view_src_hn->n_views -= 1;
|
| | AT_PRINTF("view_src %s: %d children, %d views\n",
|
| | view_src->name, view_src_hn->n_children, view_src_hn->n_views);
|
| | if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
|
| | ggml_gallocr_free_node(galloc, view_src);
|
| | }
|
| | }
|
| | else if (p_hn->allocated) {
|
| | ggml_gallocr_free_node(galloc, parent);
|
| | }
|
| | }
|
| | AT_PRINTF("\n");
|
| | }
|
| | }
|
| | }
|
| |
|
| | static bool ggml_gallocr_reserve_n_impl(
|
| | ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids, bool no_alloc) {
|
| | size_t min_hash_size = graph->n_nodes + graph->n_leafs;
|
| |
|
| | min_hash_size += min_hash_size / 4;
|
| |
|
| |
|
| | if (galloc->hash_set.size < min_hash_size) {
|
| | ggml_hash_set_free(&galloc->hash_set);
|
| | galloc->hash_set = ggml_hash_set_new(min_hash_size);
|
| | GGML_ASSERT(galloc->hash_set.keys != NULL);
|
| |
|
| | free(galloc->hash_values);
|
| | galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
|
| | GGML_ASSERT(galloc->hash_values != NULL);
|
| | }
|
| |
|
| |
|
| | for (int i = 0; i < galloc->n_buffers; i++) {
|
| | ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]);
|
| | }
|
| |
|
| |
|
| | ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
|
| |
|
| |
|
| | if (galloc->n_nodes < graph->n_nodes) {
|
| | free(galloc->node_allocs);
|
| | galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc));
|
| | GGML_ASSERT(galloc->node_allocs != NULL);
|
| | }
|
| | galloc->n_nodes = graph->n_nodes;
|
| | for (int i = 0; i < graph->n_nodes; i++) {
|
| | struct ggml_tensor * node = graph->nodes[i];
|
| | struct node_alloc * node_alloc = &galloc->node_allocs[i];
|
| | if (node->view_src || node->data) {
|
| | node_alloc->dst.buffer_id = -1;
|
| | node_alloc->dst.addr = GGML_BUFFER_ADDRESS_INVALID;
|
| | node_alloc->dst.size_max = 0;
|
| | } else {
|
| | struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
|
| | node_alloc->dst.buffer_id = hn->buffer_id;
|
| | node_alloc->dst.addr = hn->addr;
|
| | node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
|
| | }
|
| | for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| | struct ggml_tensor * src = node->src[j];
|
| | if (!src || src->view_src || src->data) {
|
| | node_alloc->src[j].buffer_id = -1;
|
| | node_alloc->src[j].addr = GGML_BUFFER_ADDRESS_INVALID;
|
| | node_alloc->src[j].size_max = 0;
|
| | } else {
|
| | struct hash_node * hn = ggml_gallocr_hash_get(galloc, src);
|
| | node_alloc->src[j].buffer_id = hn->buffer_id;
|
| | node_alloc->src[j].addr = hn->addr;
|
| | node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
|
| | }
|
| | }
|
| | }
|
| | if (galloc->n_leafs < graph->n_leafs) {
|
| | free(galloc->leaf_allocs);
|
| | galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0]));
|
| | GGML_ASSERT(galloc->leaf_allocs != NULL);
|
| | }
|
| | galloc->n_leafs = graph->n_leafs;
|
| | for (int i = 0; i < graph->n_leafs; i++) {
|
| | struct ggml_tensor * leaf = graph->leafs[i];
|
| | struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf);
|
| | if (leaf->view_src || leaf->data) {
|
| | galloc->leaf_allocs[i].leaf.buffer_id = -1;
|
| | galloc->leaf_allocs[i].leaf.addr = GGML_BUFFER_ADDRESS_INVALID;
|
| | galloc->leaf_allocs[i].leaf.size_max = 0;
|
| | } else {
|
| | galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
|
| | galloc->leaf_allocs[i].leaf.addr = hn->addr;
|
| | galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
|
| | }
|
| | }
|
| |
|
| |
|
| | for (int i = 0; i < galloc->n_buffers; i++) {
|
| |
|
| | for (int j = 0; j < i; j++) {
|
| | if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
|
| | galloc->buffers[i] = galloc->buffers[j];
|
| | break;
|
| | }
|
| | }
|
| |
|
| |
|
| | bool realloc = galloc->buffers[i] == NULL;
|
| | size_t new_size = 0;
|
| | for (int c = 0; c < galloc->buf_tallocs[i]->n_chunks; c++) {
|
| | size_t cur_chunk_size = galloc->buffers[i] ? ggml_vbuffer_chunk_size(galloc->buffers[i], c) : 0;
|
| | size_t new_chunk_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i], c);
|
| | new_size += new_chunk_size;
|
| | if (new_chunk_size > cur_chunk_size) {
|
| | realloc = true;
|
| | }
|
| | }
|
| | if (realloc) {
|
| | #ifndef NDEBUG
|
| | {
|
| | size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0;
|
| | if (cur_size > 0) {
|
| | GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n",
|
| | __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
|
| | }
|
| | }
|
| | #endif
|
| | ggml_vbuffer_free(galloc->buffers[i]);
|
| | if (no_alloc) {
|
| | galloc->buffers[i] = NULL;
|
| | } else {
|
| | galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
|
| | if (galloc->buffers[i] == NULL) {
|
| | GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size);
|
| | return false;
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | return true;
|
| | }
|
| |
|
| | void ggml_gallocr_reserve_n_size(
|
| | ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids, size_t * sizes) {
|
| | GGML_ASSERT(ggml_gallocr_reserve_n_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids, true));
|
| | for (int i = 0; i < galloc->n_buffers; i++) {
|
| | sizes[i] = 0;
|
| | for (int c = 0; c < galloc->buf_tallocs[i]->n_chunks; c++) {
|
| | sizes[i] += galloc->buf_tallocs[i]->chunks[c]->max_size;
|
| | }
|
| | }
|
| | }
|
| |
|
| | bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
|
| | return ggml_gallocr_reserve_n_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids, false);
|
| | }
|
| |
|
| | bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) {
|
| | return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
|
| | }
|
| |
|
| | static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
|
| | int buffer_id = tensor_alloc->buffer_id;
|
| | assert(tensor->data || tensor->view_src || ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
|
| |
|
| | if (tensor->view_src != NULL) {
|
| | if (tensor->buffer == NULL) {
|
| | assert(tensor_alloc->addr.offset == SIZE_MAX);
|
| | if (tensor->view_src->buffer == NULL) {
|
| |
|
| | return;
|
| | }
|
| | ggml_backend_view_init(tensor);
|
| | }
|
| | } else {
|
| | if (tensor->data == NULL) {
|
| | assert(tensor_alloc->addr.offset != SIZE_MAX);
|
| | assert(ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
|
| | ggml_vbuffer_tensor_alloc(galloc->buffers[buffer_id], tensor, tensor_alloc->addr);
|
| | } else {
|
| | if (tensor->buffer == NULL) {
|
| |
|
| | return;
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) {
|
| | size_t node_size = 0;
|
| | if (!node->data && !node->view_src) {
|
| |
|
| | if (talloc->buffer_id < 0) {
|
| | return false;
|
| | }
|
| | node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
|
| | }
|
| | return talloc->size_max >= node_size;
|
| | }
|
| |
|
| | static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph * graph) {
|
| | if (galloc->n_nodes != graph->n_nodes) {
|
| | #ifndef NDEBUG
|
| | GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
|
| | #endif
|
| | return true;
|
| | }
|
| |
|
| | if (galloc->n_leafs != graph->n_leafs) {
|
| | #ifndef NDEBUG
|
| | GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
|
| | #endif
|
| | return true;
|
| | }
|
| |
|
| | for (int i = 0; i < graph->n_nodes; i++) {
|
| | struct ggml_tensor * node = graph->nodes[i];
|
| | struct node_alloc * node_alloc = &galloc->node_allocs[i];
|
| |
|
| | if (!ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
|
| | #ifndef NDEBUG
|
| | GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
|
| | #endif
|
| | return true;
|
| | }
|
| |
|
| | for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| | struct ggml_tensor * src = node->src[j];
|
| | if (src == NULL) {
|
| | continue;
|
| | }
|
| | if (!ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
|
| | #ifndef NDEBUG
|
| | GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
|
| | #endif
|
| | return true;
|
| | }
|
| | }
|
| | }
|
| |
|
| | return false;
|
| | }
|
| |
|
| | bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph) {
|
| | if (ggml_gallocr_needs_realloc(galloc, graph)) {
|
| | if (galloc->n_buffers == 1) {
|
| | #ifndef NDEBUG
|
| | GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
|
| | #endif
|
| | if (!ggml_gallocr_reserve(galloc, graph)) {
|
| | return false;
|
| | }
|
| | } else {
|
| | #ifndef NDEBUG
|
| | GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
|
| | #endif
|
| | return false;
|
| | }
|
| | }
|
| |
|
| |
|
| | for (int i = 0; i < galloc->n_buffers; i++) {
|
| | if (galloc->buffers[i] != NULL) {
|
| | ggml_vbuffer_reset(galloc->buffers[i]);
|
| | }
|
| | }
|
| |
|
| |
|
| |
|
| | for (int i = 0; i < graph->n_leafs; i++) {
|
| | struct ggml_tensor * leaf = graph->leafs[i];
|
| | struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
|
| | ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
|
| | }
|
| |
|
| | for (int i = 0; i < graph->n_nodes; i++) {
|
| | struct ggml_tensor * node = graph->nodes[i];
|
| | struct node_alloc * node_alloc = &galloc->node_allocs[i];
|
| | for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| | struct ggml_tensor * src = node->src[j];
|
| | if (src == NULL) {
|
| | continue;
|
| | }
|
| | ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
|
| | }
|
| | ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
|
| | }
|
| |
|
| | return true;
|
| | }
|
| |
|
| | size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) {
|
| | GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
|
| |
|
| | if (galloc->buffers[buffer_id] == NULL) {
|
| | return 0;
|
| | }
|
| |
|
| | for (int i = 0; i < buffer_id; i++) {
|
| | if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
|
| |
|
| |
|
| | return 0;
|
| | }
|
| | }
|
| |
|
| | return ggml_vbuffer_size(galloc->buffers[buffer_id]);
|
| | }
|
| |
|
| |
|
| |
|
| | static void free_buffers(ggml_backend_buffer_t ** buffers, const size_t * n_buffers) {
|
| | for (size_t i = 0; i < *n_buffers; i++) {
|
| | ggml_backend_buffer_free((*buffers)[i]);
|
| | }
|
| | free(*buffers);
|
| | }
|
| |
|
| | static bool alloc_tensor_range(struct ggml_context * ctx,
|
| | struct ggml_tensor * first, struct ggml_tensor * last,
|
| | ggml_backend_buffer_type_t buft, size_t size,
|
| | ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
|
| |
|
| | ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
|
| | if (buffer == NULL) {
|
| | GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size);
|
| | free_buffers(buffers, n_buffers);
|
| | return false;
|
| | }
|
| |
|
| | *buffers = realloc(*buffers, sizeof(ggml_backend_buffer_t) * (*n_buffers + 1));
|
| | (*buffers)[(*n_buffers)++] = buffer;
|
| |
|
| | struct ggml_tallocr tallocr = ggml_tallocr_new(buffer);
|
| |
|
| | for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, t)) {
|
| | enum ggml_status status = GGML_STATUS_SUCCESS;
|
| | if (t->data == NULL) {
|
| | if (t->view_src == NULL) {
|
| | status = ggml_tallocr_alloc(&tallocr, t);
|
| | } else if (t->buffer == NULL) {
|
| | status = ggml_backend_view_init(t);
|
| | }
|
| | } else {
|
| | if (t->view_src != NULL && t->buffer == NULL) {
|
| |
|
| | status = ggml_backend_view_init(t);
|
| | }
|
| | }
|
| | if (status != GGML_STATUS_SUCCESS) {
|
| | GGML_LOG_ERROR("%s: failed to initialize tensor %s\n", __func__, t->name);
|
| | free_buffers(buffers, n_buffers);
|
| | return false;
|
| | }
|
| | }
|
| |
|
| | return true;
|
| | }
|
| |
|
| | static ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft_impl(
|
| | struct ggml_context * ctx, ggml_backend_buffer_type_t buft, size_t * nbytes_total, bool no_alloc) {
|
| | GGML_ASSERT(ggml_get_no_alloc(ctx) == true);
|
| |
|
| | size_t alignment = ggml_backend_buft_get_alignment(buft);
|
| | size_t max_size = ggml_backend_buft_get_max_size(buft);
|
| |
|
| | ggml_backend_buffer_t * buffers = NULL;
|
| | size_t n_buffers = 0;
|
| | *nbytes_total = 0;
|
| |
|
| | size_t cur_buf_size = 0;
|
| | struct ggml_tensor * first = ggml_get_first_tensor(ctx);
|
| | for (struct ggml_tensor * t = first; t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
| | size_t this_size = 0;
|
| | if (t->data == NULL && t->view_src == NULL) {
|
| | this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment);
|
| | }
|
| |
|
| | if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) {
|
| |
|
| | if (!no_alloc && !alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
|
| | return NULL;
|
| | }
|
| | first = t;
|
| | *nbytes_total += cur_buf_size;
|
| | cur_buf_size = this_size;
|
| | } else {
|
| | cur_buf_size += this_size;
|
| | }
|
| | }
|
| |
|
| |
|
| | if (cur_buf_size > 0) {
|
| | *nbytes_total += cur_buf_size;
|
| | if (!no_alloc && !alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) {
|
| | return NULL;
|
| | }
|
| | }
|
| |
|
| | if (no_alloc) {
|
| | return NULL;
|
| | }
|
| |
|
| | if (n_buffers == 0) {
|
| | #ifndef NDEBUG
|
| | GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
|
| | #endif
|
| | GGML_ASSERT(!buffers);
|
| | return NULL;
|
| | }
|
| |
|
| | ggml_backend_buffer_t buffer;
|
| | if (n_buffers == 1) {
|
| | buffer = buffers[0];
|
| | } else {
|
| | buffer = ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
|
| | }
|
| | if (buffers) {
|
| | free(buffers);
|
| | }
|
| | return buffer;
|
| | }
|
| |
|
| | size_t ggml_backend_alloc_ctx_tensors_from_buft_size(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) {
|
| | size_t nbytes_total = 0;
|
| | ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft_impl(ctx, buft, &nbytes_total, true);
|
| | GGML_ASSERT(!buf);
|
| | return nbytes_total;
|
| | }
|
| |
|
| | ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) {
|
| | size_t nbytes_total = 0;
|
| | return ggml_backend_alloc_ctx_tensors_from_buft_impl(ctx, buft, &nbytes_total, false);
|
| | }
|
| |
|
| | ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) {
|
| | return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend));
|
| | }
|
| |
|