Spaces:
Runtime error
Runtime error
| // GGML internal header | |
| extern "C" { | |
| // required for mmap as gguf only guarantees 32-byte alignment | |
| // static_assert should be a #define, but if it's not, | |
| // fall back to the _Static_assert C11 keyword. | |
| // if C99 - static_assert is noop | |
| // ref: https://stackoverflow.com/a/53923785/4039976 | |
| static inline int ggml_up32(int n) { | |
| return (n + 31) & ~31; | |
| } | |
| //static inline int ggml_up64(int n) { | |
| // return (n + 63) & ~63; | |
| //} | |
| static inline int ggml_up(int n, int m) { | |
| // assert m is a power of 2 | |
| GGML_ASSERT((m & (m - 1)) == 0); | |
| return (n + m - 1) & ~(m - 1); | |
| } | |
| // | |
| // logging | |
| // | |
| GGML_ATTRIBUTE_FORMAT(2, 3) | |
| void ggml_log_internal (enum ggml_log_level level, const char * format, ...); | |
| void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data); | |
| // tensor params | |
| static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) { | |
| GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings | |
| assert(params_size <= GGML_MAX_OP_PARAMS); | |
| memcpy(tensor->op_params, params, params_size); | |
| } | |
| static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) { | |
| assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t)); | |
| return ((const int32_t *)(tensor->op_params))[i]; | |
| } | |
| static float ggml_get_op_params_f32(const struct ggml_tensor * tensor, uint32_t i) { | |
| assert(i < GGML_MAX_OP_PARAMS / sizeof(float)); | |
| return ((const float *)(tensor->op_params))[i]; | |
| } | |
| static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) { | |
| assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t)); | |
| ((int32_t *)(tensor->op_params))[i] = value; | |
| } | |
| static void ggml_set_op_params_f32(struct ggml_tensor * tensor, uint32_t i, float value) { | |
| assert(i < GGML_MAX_OP_PARAMS / sizeof(float)); | |
| ((float *)(tensor->op_params))[i] = value; | |
| } | |
| struct ggml_map_custom1_op_params { | |
| ggml_custom1_op_t fun; | |
| int n_tasks; | |
| void * userdata; | |
| }; | |
| struct ggml_map_custom2_op_params { | |
| ggml_custom2_op_t fun; | |
| int n_tasks; | |
| void * userdata; | |
| }; | |
| struct ggml_map_custom3_op_params { | |
| ggml_custom3_op_t fun; | |
| int n_tasks; | |
| void * userdata; | |
| }; | |
| // bitset | |
| typedef uint32_t ggml_bitset_t; | |
| static_assert(sizeof(ggml_bitset_t) == 4, "bitset_t constants must be updated"); | |
| static size_t ggml_bitset_size(size_t n) { | |
| return (n + BITSET_MASK) >> BITSET_SHR; | |
| } | |
| static inline bool ggml_bitset_get(const ggml_bitset_t * bitset, size_t i) { | |
| return !!(bitset[i >> BITSET_SHR] & (1u << (i & BITSET_MASK))); | |
| } | |
| static inline void ggml_bitset_set(ggml_bitset_t * bitset, size_t i) { | |
| bitset[i >> BITSET_SHR] |= (1u << (i & BITSET_MASK)); | |
| } | |
| static inline void ggml_bitset_clear(ggml_bitset_t * bitset, size_t i) { | |
| bitset[i >> BITSET_SHR] &= ~(1u << (i & BITSET_MASK)); | |
| } | |
| // hash set | |
| struct ggml_hash_set { | |
| size_t size; | |
| ggml_bitset_t * used; // whether or not the keys are in use i.e. set | |
| struct ggml_tensor ** keys; // actual tensors in the set, keys[i] is only defined if ggml_bitset_get(used, i) | |
| }; | |
| struct ggml_hash_set ggml_hash_set_new(size_t size); | |
| void ggml_hash_set_free(struct ggml_hash_set * hash_set); | |
| // returns the minimum size for a hash set that can hold min_sz elements | |
| size_t ggml_hash_size(size_t min_sz); | |
| // remove all elements from the hash set | |
| void ggml_hash_set_reset(struct ggml_hash_set * hash_set); | |
| // returns true if key is in the hash set | |
| static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key); | |
| // returns GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted | |
| static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key); | |
| // returns GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full | |
| static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key); | |
| // return index, asserts if table is full | |
| static size_t ggml_hash_find_or_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key); | |
| // hash function for ggml_tensor | |
| static inline size_t ggml_hash(const struct ggml_tensor * p) { | |
| // the last 4 bits are always zero due to alignment | |
| return (size_t)(uintptr_t)p >> 4; | |
| } | |
| static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key) { | |
| size_t h = ggml_hash(key) % hash_set->size; | |
| // linear probing | |
| size_t i = h; | |
| while (ggml_bitset_get(hash_set->used, i) && hash_set->keys[i] != key) { | |
| i = (i + 1) % hash_set->size; | |
| if (i == h) { | |
| // visited all hash table entries -> not found | |
| return GGML_HASHSET_FULL; | |
| } | |
| } | |
| return i; | |
| } | |
| static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key) { | |
| size_t i = ggml_hash_find(hash_set, key); | |
| return i != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, i); | |
| } | |
| static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key) { | |
| size_t h = ggml_hash(key) % hash_set->size; | |
| // linear probing | |
| size_t i = h; | |
| do { | |
| if (!ggml_bitset_get(hash_set->used, i)) { | |
| ggml_bitset_set(hash_set->used, i); | |
| hash_set->keys[i] = key; | |
| return i; | |
| } | |
| if (hash_set->keys[i] == key) { | |
| return GGML_HASHSET_ALREADY_EXISTS; | |
| } | |
| i = (i + 1) % hash_set->size; | |
| } while (i != h); | |
| // visited all hash table entries -> not found | |
| GGML_ABORT("fatal error"); | |
| } | |
| static size_t ggml_hash_find_or_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key) { | |
| size_t h = ggml_hash(key) % hash_set->size; | |
| // linear probing | |
| size_t i = h; | |
| do { | |
| if (!ggml_bitset_get(hash_set->used, i)) { | |
| ggml_bitset_set(hash_set->used, i); | |
| hash_set->keys[i] = key; | |
| return i; | |
| } | |
| if (hash_set->keys[i] == key) { | |
| return i; | |
| } | |
| i = (i + 1) % hash_set->size; | |
| } while (i != h); | |
| // visited all hash table entries -> not found | |
| GGML_ABORT("fatal error"); | |
| } | |
| // computation graph | |
| enum ggml_cgraph_eval_order { | |
| GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0, | |
| GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT, | |
| GGML_CGRAPH_EVAL_ORDER_COUNT | |
| }; | |
| struct ggml_cgraph { | |
| int size; | |
| int n_nodes; | |
| int n_leafs; | |
| struct ggml_tensor ** nodes; | |
| struct ggml_tensor ** grads; | |
| struct ggml_tensor ** leafs; | |
| struct ggml_hash_set visited_hash_set; | |
| enum ggml_cgraph_eval_order order; | |
| }; | |
| struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph, int i0, int i1); | |
| // Memory allocation | |
| void * ggml_aligned_malloc(size_t size); | |
| void ggml_aligned_free(void * ptr, size_t size); | |
| // TODO: move to threading file | |
| void ggml_critical_section_start(void); | |
| void ggml_critical_section_end(void); | |
| } | |