| #define _CRT_SECURE_NO_DEPRECATE |
| #define _USE_MATH_DEFINES |
|
|
| #include "ggml-backend-impl.h" |
| #include "ggml-backend.h" |
| #include "traits.h" |
| #include "ggml-cpu-impl.h" |
| #include "ggml-impl.h" |
| #include "quants.h" |
| #include "ggml-threading.h" |
| #include "unary-ops.h" |
| #include "binary-ops.h" |
| #include "vec.h" |
| #include "ops.h" |
| #include "ggml.h" |
| #include "common.h" |
|
|
| #if defined(_MSC_VER) || defined(__MINGW32__) |
| #include <malloc.h> |
| #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) |
| #include <alloca.h> |
| #endif |
|
|
| #include <assert.h> |
| #include <errno.h> |
| #include <time.h> |
| #include <math.h> |
| #include <stdlib.h> |
| #include <string.h> |
| #include <stdint.h> |
| #include <inttypes.h> |
| #include <stdio.h> |
| #include <float.h> |
| #include <limits.h> |
| #include <stdarg.h> |
| #include <signal.h> |
| #if defined(__gnu_linux__) |
| #include <syscall.h> |
| #endif |
|
|
| #ifdef GGML_USE_OPENMP |
| #include <omp.h> |
| #endif |
|
|
| #if defined(__ARM_FEATURE_SVE) || defined(__ARM_FEATURE_MATMUL_INT8) |
| #undef GGML_USE_LLAMAFILE |
| #endif |
|
|
| #ifdef GGML_USE_LLAMAFILE |
| #include "llamafile/sgemm.h" |
| #endif |
|
|
| |
| |
| |
| #define GGML_CACHE_LINE 64 |
|
|
| #if defined(__clang__) || defined(__GNUC__) |
| #define GGML_CACHE_ALIGN __attribute__((aligned(GGML_CACHE_LINE))) |
| #endif |
|
|
| #if defined(__has_feature) |
| #if __has_feature(thread_sanitizer) |
| #define GGML_TSAN_ENABLED 1 |
| #endif |
| #else |
| #if defined(__SANITIZE_THREAD__) |
| #define GGML_TSAN_ENABLED 1 |
| #endif |
| #endif |
|
|
| #define UNUSED GGML_UNUSED |
| #define SWAP(x, y, T) do { T SWAP = x; (x) = y; (y) = SWAP; } while (0) |
|
|
| |
| float ggml_table_f32_f16[1 << 16]; |
|
|
| |
| float ggml_table_f32_e8m0_half[1 << 8]; |
|
|
| #if defined(__ARM_ARCH) |
| struct ggml_arm_arch_features_type { |
| int sve_cnt; |
| } ggml_arm_arch_features = { 0 }; |
| #endif |
|
|
| #if defined(__riscv) |
| struct ggml_riscv_arch_features_type { |
| int rvv_vlen; |
| } ggml_riscv_arch_features = { 0 }; |
| #endif |
|
|
| #if defined(_WIN32) |
|
|
| #define WIN32_LEAN_AND_MEAN |
| #ifndef NOMINMAX |
| #define NOMINMAX |
| #endif |
| #include <windows.h> |
|
|
| #if defined(_MSC_VER) && !defined(__clang__) |
| #define GGML_CACHE_ALIGN __declspec(align(GGML_CACHE_LINE)) |
|
|
| typedef volatile LONG atomic_int; |
| typedef atomic_int atomic_bool; |
| typedef atomic_int atomic_flag; |
|
|
| #define ATOMIC_FLAG_INIT 0 |
|
|
| typedef enum { |
| memory_order_relaxed, |
| memory_order_consume, |
| memory_order_acquire, |
| memory_order_release, |
| memory_order_acq_rel, |
| memory_order_seq_cst |
| } memory_order; |
|
|
| static void atomic_store(atomic_int * ptr, LONG val) { |
| InterlockedExchange(ptr, val); |
| } |
| static void atomic_store_explicit(atomic_int * ptr, LONG val, memory_order mo) { |
| |
| InterlockedExchange(ptr, val); |
| } |
| static LONG atomic_load(atomic_int * ptr) { |
| return InterlockedCompareExchange(ptr, 0, 0); |
| } |
| static LONG atomic_load_explicit(atomic_int * ptr, memory_order mo) { |
| |
| return InterlockedCompareExchange(ptr, 0, 0); |
| } |
| static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) { |
| return InterlockedExchangeAdd(ptr, inc); |
| } |
| static LONG atomic_fetch_add_explicit(atomic_int * ptr, LONG inc, memory_order mo) { |
| |
| return InterlockedExchangeAdd(ptr, inc); |
| } |
| static atomic_bool atomic_flag_test_and_set(atomic_flag * ptr) { |
| return InterlockedExchange(ptr, 1); |
| } |
| static void atomic_flag_clear(atomic_flag * ptr) { |
| InterlockedExchange(ptr, 0); |
| } |
| static void atomic_thread_fence(memory_order mo) { |
| MemoryBarrier(); |
| } |
| #else |
| #include <stdatomic.h> |
| #endif |
|
|
| typedef HANDLE pthread_t; |
|
|
| typedef DWORD thread_ret_t; |
| static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) { |
| (void) unused; |
| HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL); |
| if (handle == NULL) |
| { |
| return EAGAIN; |
| } |
|
|
| *out = handle; |
| return 0; |
| } |
|
|
| static int pthread_join(pthread_t thread, void * unused) { |
| (void) unused; |
| int ret = (int) WaitForSingleObject(thread, INFINITE); |
| CloseHandle(thread); |
| return ret; |
| } |
|
|
| static int sched_yield (void) { |
| Sleep (0); |
| return 0; |
| } |
| #else |
|
|
| #include <pthread.h> |
| #include <stdatomic.h> |
| #include <sched.h> |
| #if defined(__FreeBSD__) |
| #include <pthread_np.h> |
| #endif |
|
|
| typedef void * thread_ret_t; |
|
|
| #include <sys/types.h> |
| #include <sys/stat.h> |
| #include <unistd.h> |
|
|
| #endif |
|
|
| typedef pthread_t ggml_thread_t; |
|
|
| #define GGML_THREADPOOL_N_THREADS_MASK (0xffffU) |
| #define GGML_THREADPOOL_N_THREADS_BITS (16) |
|
|
| #if defined(__APPLE__) |
| #include <unistd.h> |
| #include <mach/mach.h> |
| #include <TargetConditionals.h> |
| #endif |
|
|
| static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { |
| [GGML_TYPE_F32] = { |
| .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp32, |
| .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32, |
| .vec_dot_type = GGML_TYPE_F32, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_F16] = { |
| .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp16, |
| .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16, |
| .vec_dot_type = GGML_TYPE_F16, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_Q4_0] = { |
| .from_float = quantize_row_q4_0, |
| .vec_dot = ggml_vec_dot_q4_0_q8_0, |
| .vec_dot_type = GGML_TYPE_Q8_0, |
| #if defined (__ARM_FEATURE_MATMUL_INT8) |
| .nrows = 2, |
| #else |
| .nrows = 1, |
| #endif |
| }, |
| [GGML_TYPE_Q4_1] = { |
| .from_float = quantize_row_q4_1, |
| .vec_dot = ggml_vec_dot_q4_1_q8_1, |
| .vec_dot_type = GGML_TYPE_Q8_1, |
| #if defined (__ARM_FEATURE_MATMUL_INT8) |
| .nrows = 2, |
| #else |
| .nrows = 1, |
| #endif |
| }, |
| [GGML_TYPE_Q5_0] = { |
| .from_float = quantize_row_q5_0, |
| .vec_dot = ggml_vec_dot_q5_0_q8_0, |
| .vec_dot_type = GGML_TYPE_Q8_0, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_Q5_1] = { |
| .from_float = quantize_row_q5_1, |
| .vec_dot = ggml_vec_dot_q5_1_q8_1, |
| .vec_dot_type = GGML_TYPE_Q8_1, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_Q8_0] = { |
| .from_float = quantize_row_q8_0, |
| .vec_dot = ggml_vec_dot_q8_0_q8_0, |
| .vec_dot_type = GGML_TYPE_Q8_0, |
| #if defined (__ARM_FEATURE_MATMUL_INT8) |
| .nrows = 2, |
| #else |
| .nrows = 1, |
| #endif |
| }, |
| [GGML_TYPE_Q8_1] = { |
| .from_float = quantize_row_q8_1, |
| .vec_dot_type = GGML_TYPE_Q8_1, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_MXFP4] = { |
| .from_float = quantize_row_mxfp4, |
| .vec_dot = ggml_vec_dot_mxfp4_q8_0, |
| .vec_dot_type = GGML_TYPE_Q8_0, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_NVFP4] = { |
| .from_float = quantize_row_nvfp4, |
| .vec_dot = ggml_vec_dot_nvfp4_q8_0, |
| .vec_dot_type = GGML_TYPE_Q8_0, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_Q2_K] = { |
| .from_float = quantize_row_q2_K, |
| .vec_dot = ggml_vec_dot_q2_K_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_Q3_K] = { |
| .from_float = quantize_row_q3_K, |
| .vec_dot = ggml_vec_dot_q3_K_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_Q4_K] = { |
| .from_float = quantize_row_q4_K, |
| .vec_dot = ggml_vec_dot_q4_K_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| #if defined (__ARM_FEATURE_MATMUL_INT8) |
| .nrows = 2, |
| #else |
| .nrows = 1, |
| #endif |
| }, |
| [GGML_TYPE_Q5_K] = { |
| .from_float = quantize_row_q5_K, |
| .vec_dot = ggml_vec_dot_q5_K_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_Q6_K] = { |
| .from_float = quantize_row_q6_K, |
| .vec_dot = ggml_vec_dot_q6_K_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| #if defined (__ARM_FEATURE_MATMUL_INT8) |
| .nrows = 2, |
| #else |
| .nrows = 1, |
| #endif |
| }, |
| [GGML_TYPE_IQ2_XXS] = { |
| .from_float = NULL, |
| .vec_dot = ggml_vec_dot_iq2_xxs_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_IQ2_XS] = { |
| .from_float = NULL, |
| .vec_dot = ggml_vec_dot_iq2_xs_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_IQ3_XXS] = { |
| |
| |
| .vec_dot = ggml_vec_dot_iq3_xxs_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_IQ3_S] = { |
| |
| .vec_dot = ggml_vec_dot_iq3_s_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_IQ2_S] = { |
| |
| .vec_dot = ggml_vec_dot_iq2_s_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_IQ1_S] = { |
| .from_float = NULL, |
| .vec_dot = ggml_vec_dot_iq1_s_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_IQ1_M] = { |
| .from_float = NULL, |
| .vec_dot = ggml_vec_dot_iq1_m_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_IQ4_NL] = { |
| .from_float = quantize_row_iq4_nl, |
| .vec_dot = ggml_vec_dot_iq4_nl_q8_0, |
| .vec_dot_type = GGML_TYPE_Q8_0, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_IQ4_XS] = { |
| .from_float = quantize_row_iq4_xs, |
| .vec_dot = ggml_vec_dot_iq4_xs_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_Q8_K] = { |
| .from_float = quantize_row_q8_K, |
| }, |
| [GGML_TYPE_BF16] = { |
| .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_bf16, |
| .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16, |
| .vec_dot_type = GGML_TYPE_BF16, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_TQ1_0] = { |
| .from_float = quantize_row_tq1_0, |
| .vec_dot = ggml_vec_dot_tq1_0_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_TQ2_0] = { |
| .from_float = quantize_row_tq2_0, |
| .vec_dot = ggml_vec_dot_tq2_0_q8_K, |
| .vec_dot_type = GGML_TYPE_Q8_K, |
| .nrows = 1, |
| }, |
| [GGML_TYPE_I32] = { |
| .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_i32, |
| }, |
| }; |
|
|
| const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type) { |
| return &type_traits_cpu[type]; |
| } |
|
|
| |
| |
| |
|
|
| typedef pthread_t ggml_thread_t; |
|
|
| #if defined(_WIN32) |
|
|
| typedef CONDITION_VARIABLE ggml_cond_t; |
| typedef SRWLOCK ggml_mutex_t; |
|
|
| #define ggml_mutex_init(m) InitializeSRWLock(m) |
| #define ggml_mutex_destroy(m) |
| #define ggml_mutex_lock(m) AcquireSRWLockExclusive(m) |
| #define ggml_mutex_unlock(m) ReleaseSRWLockExclusive(m) |
| #define ggml_mutex_lock_shared(m) AcquireSRWLockShared(m) |
| #define ggml_mutex_unlock_shared(m) ReleaseSRWLockShared(m) |
|
|
| #define ggml_cond_init(c) InitializeConditionVariable(c) |
| #define ggml_cond_destroy(c) |
| #define ggml_cond_wait(c, m) SleepConditionVariableSRW(c, m, INFINITE, CONDITION_VARIABLE_LOCKMODE_SHARED) |
| #define ggml_cond_broadcast(c) WakeAllConditionVariable(c) |
|
|
| #define ggml_thread_create pthread_create |
| #define ggml_thread_join pthread_join |
|
|
| #else |
|
|
| typedef pthread_cond_t ggml_cond_t; |
| typedef pthread_mutex_t ggml_mutex_t; |
|
|
| #define ggml_mutex_init(m) pthread_mutex_init(m, NULL) |
| #define ggml_mutex_destroy(m) pthread_mutex_destroy(m) |
| #define ggml_mutex_lock(m) pthread_mutex_lock(m) |
| #define ggml_mutex_unlock(m) pthread_mutex_unlock(m) |
| #define ggml_mutex_lock_shared(m) pthread_mutex_lock(m) |
| #define ggml_mutex_unlock_shared(m) pthread_mutex_unlock(m) |
|
|
| #define ggml_lock_init(x) UNUSED(x) |
| #define ggml_lock_destroy(x) UNUSED(x) |
| #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) |
| #define ggml_lock_lock(x) _mm_pause() |
| #else |
| #define ggml_lock_lock(x) UNUSED(x) |
| #endif |
| #define ggml_lock_unlock(x) UNUSED(x) |
|
|
| #define GGML_LOCK_INITIALIZER 0 |
| #define ggml_cond_init(c) pthread_cond_init(c, NULL) |
| #define ggml_cond_destroy(c) pthread_cond_destroy(c) |
| #define ggml_cond_wait(c, m) pthread_cond_wait(c, m) |
| #define ggml_cond_broadcast(c) pthread_cond_broadcast(c) |
|
|
| #define ggml_thread_create pthread_create |
| #define ggml_thread_join pthread_join |
|
|
| #endif |
|
|
| |
| struct ggml_threadpool { |
| ggml_mutex_t mutex; |
| ggml_cond_t cond; |
|
|
| struct ggml_cgraph * cgraph; |
| struct ggml_cplan * cplan; |
|
|
| |
| atomic_int n_graph; |
| atomic_int GGML_CACHE_ALIGN n_barrier; |
| atomic_int GGML_CACHE_ALIGN n_barrier_passed; |
| atomic_int GGML_CACHE_ALIGN current_chunk; |
|
|
| |
| atomic_bool stop; |
| atomic_bool pause; |
| atomic_int abort; |
|
|
| struct ggml_compute_state * workers; |
| int n_threads; |
| int32_t prio; |
| uint32_t poll; |
|
|
| enum ggml_status ec; |
| }; |
|
|
| |
| struct ggml_compute_state { |
| #ifndef GGML_USE_OPENMP |
| ggml_thread_t thrd; |
| int last_graph; |
| bool pending; |
| #endif |
| bool cpumask[GGML_MAX_N_THREADS]; |
| struct ggml_threadpool * threadpool; |
| int ith; |
| }; |
|
|
| |
| #if defined(__aarch64__) && ( defined(__clang__) || defined(__GNUC__) ) |
| static inline void ggml_thread_cpu_relax(void) { |
| __asm__ volatile("yield" ::: "memory"); |
| } |
| #elif defined(__x86_64__) |
| static inline void ggml_thread_cpu_relax(void) { |
| _mm_pause(); |
| } |
| #elif defined(__riscv) |
| static inline void ggml_thread_cpu_relax(void) { |
| #ifdef __riscv_zihintpause |
| __asm__ __volatile__ ("pause"); |
| #else |
| |
| __asm__ __volatile__ (".4byte 0x100000F"); |
| #endif |
| } |
| #else |
| static inline void ggml_thread_cpu_relax(void) {;} |
| #endif |
|
|
| |
| |
| |
|
|
| #define GGML_NUMA_MAX_NODES 8 |
| #define GGML_NUMA_MAX_CPUS 512 |
|
|
| struct ggml_numa_node { |
| uint32_t cpus[GGML_NUMA_MAX_CPUS]; |
| uint32_t n_cpus; |
| }; |
|
|
| struct ggml_numa_nodes { |
| enum ggml_numa_strategy numa_strategy; |
| struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES]; |
| uint32_t n_nodes; |
| uint32_t total_cpus; |
| uint32_t current_node; |
| #if defined(__gnu_linux__) |
| cpu_set_t cpuset; |
| #else |
| uint32_t cpuset; |
| #endif |
| }; |
|
|
| |
| |
| |
|
|
| struct ggml_state { |
| struct ggml_numa_nodes numa; |
| }; |
|
|
| static struct ggml_state g_state = {0}; |
|
|
| void ggml_barrier(struct ggml_threadpool * tp) { |
| int n_threads = atomic_load_explicit(&tp->n_graph, memory_order_relaxed) & GGML_THREADPOOL_N_THREADS_MASK; |
| if (n_threads == 1) { |
| return; |
| } |
|
|
| #ifdef GGML_USE_OPENMP |
| #pragma omp barrier |
| #else |
| int n_passed = atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed); |
|
|
| |
| int n_barrier = atomic_fetch_add_explicit(&tp->n_barrier, 1, memory_order_seq_cst); |
|
|
| if (n_barrier == (n_threads - 1)) { |
| |
| atomic_store_explicit(&tp->n_barrier, 0, memory_order_relaxed); |
|
|
| |
| atomic_fetch_add_explicit(&tp->n_barrier_passed, 1, memory_order_seq_cst); |
| return; |
| } |
|
|
| |
| while (atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed) == n_passed) { |
| ggml_thread_cpu_relax(); |
| } |
|
|
| |
| |
| #ifdef GGML_TSAN_ENABLED |
| atomic_fetch_add_explicit(&tp->n_barrier_passed, 0, memory_order_seq_cst); |
| #else |
| atomic_thread_fence(memory_order_seq_cst); |
| #endif |
| #endif |
| } |
|
|
| void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value) { |
| atomic_store_explicit(&tp->current_chunk, value, memory_order_relaxed); |
| } |
|
|
| int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value) { |
| return atomic_fetch_add_explicit(&tp->current_chunk, value, memory_order_relaxed); |
| } |
|
|
| #if defined(__gnu_linux__) |
| static cpu_set_t ggml_get_numa_affinity(void) { |
| cpu_set_t cpuset; |
| pthread_t thread; |
| thread = pthread_self(); |
| CPU_ZERO(&cpuset); |
| pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); |
| return cpuset; |
| } |
| #else |
| static uint32_t ggml_get_numa_affinity(void) { |
| return 0; |
| } |
| #endif |
|
|
| void ggml_numa_init(enum ggml_numa_strategy numa_flag) { |
| if (g_state.numa.n_nodes > 0) { |
| fprintf(stderr, "ggml_numa_init: NUMA already initialized\n"); |
|
|
| return; |
| } |
|
|
| #if defined(__gnu_linux__) |
| struct stat st; |
| char path[256]; |
| int rv; |
|
|
| |
| g_state.numa.numa_strategy = numa_flag; |
|
|
| GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy); |
|
|
| g_state.numa.cpuset = ggml_get_numa_affinity(); |
|
|
| |
| while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) { |
| rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes); |
| GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); |
| if (stat(path, &st) != 0) { break; } |
| ++g_state.numa.n_nodes; |
| } |
|
|
| |
| while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) { |
| rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus); |
| GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); |
| if (stat(path, &st) != 0) { break; } |
| ++g_state.numa.total_cpus; |
| } |
|
|
| GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus); |
|
|
| |
| uint current_cpu; |
| int getcpu_ret = 0; |
| #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 33) || defined(__COSMOPOLITAN__) |
| getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node); |
| #else |
| |
| # if !defined(SYS_getcpu) && defined(SYS_get_cpu) |
| # define SYS_getcpu SYS_get_cpu |
| # endif |
| getcpu_ret = syscall(SYS_getcpu, ¤t_cpu, &g_state.numa.current_node); |
| #endif |
|
|
| if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) { |
| g_state.numa.n_nodes = 0; |
| return; |
| } |
|
|
| GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu); |
|
|
| for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) { |
| struct ggml_numa_node * node = &g_state.numa.nodes[n]; |
| GGML_PRINT_DEBUG("CPUs on node %u:", n); |
| node->n_cpus = 0; |
| for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) { |
| rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c); |
| GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); |
| if (stat(path, &st) == 0) { |
| node->cpus[node->n_cpus++] = c; |
| GGML_PRINT_DEBUG(" %u", c); |
| } |
| } |
| GGML_PRINT_DEBUG("\n"); |
| } |
|
|
| if (ggml_is_numa()) { |
| FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r"); |
| if (fptr != NULL) { |
| char buf[42]; |
| if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) { |
| GGML_LOG_WARN("/proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n"); |
| } |
| fclose(fptr); |
| } |
| } |
| #else |
| UNUSED(numa_flag); |
| |
| #endif |
| } |
|
|
| bool ggml_is_numa(void) { |
| return g_state.numa.n_nodes > 1; |
| } |
|
|
| #if defined(__ARM_ARCH) |
| #if defined(__aarch64__) && defined(__ARM_FEATURE_SVE) |
| #include <arm_sve.h> |
| static void ggml_init_arm_arch_features(void) { |
| ggml_arm_arch_features.sve_cnt = svcntb(); |
| } |
| #else |
| static void ggml_init_arm_arch_features(void) {} |
| #endif |
| #endif |
|
|
| #if defined(__riscv) && defined(__riscv_v_intrinsic) |
| #include <riscv_vector.h> |
| static void ggml_init_riscv_arch_features(void) { |
| ggml_riscv_arch_features.rvv_vlen = __riscv_vlenb(); |
| } |
| #else |
| static void ggml_init_riscv_arch_features(void) {} |
| #endif |
|
|
| struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) { |
| GGML_ASSERT(!ggml_get_no_alloc(ctx)); |
|
|
| struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1); |
|
|
| ggml_set_i32(result, value); |
|
|
| return result; |
| } |
|
|
| struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) { |
| GGML_ASSERT(!ggml_get_no_alloc(ctx)); |
|
|
| struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); |
|
|
| ggml_set_f32(result, value); |
|
|
| return result; |
| } |
|
|
| struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { |
| const int n = ggml_nrows(tensor); |
| const int nc = tensor->ne[0]; |
| const size_t n1 = tensor->nb[1]; |
|
|
| char * const data = tensor->data; |
|
|
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| { |
| assert(tensor->nb[0] == sizeof(int8_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); |
| } |
| } break; |
| case GGML_TYPE_I16: |
| { |
| assert(tensor->nb[0] == sizeof(int16_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); |
| } |
| } break; |
| case GGML_TYPE_I32: |
| { |
| assert(tensor->nb[0] == sizeof(int32_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); |
| } |
| } break; |
| case GGML_TYPE_F16: |
| { |
| assert(tensor->nb[0] == sizeof(ggml_fp16_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); |
| } |
| } break; |
| case GGML_TYPE_BF16: |
| { |
| assert(tensor->nb[0] == sizeof(ggml_fp16_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value)); |
| } |
| } break; |
| case GGML_TYPE_F32: |
| { |
| assert(tensor->nb[0] == sizeof(float)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_f32(nc, (float *)(data + i*n1), value); |
| } |
| } break; |
| default: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| } |
|
|
| return tensor; |
| } |
|
|
| struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { |
| const int n = ggml_nrows(tensor); |
| const int nc = tensor->ne[0]; |
| const size_t n1 = tensor->nb[1]; |
|
|
| char * const data = tensor->data; |
|
|
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| { |
| assert(tensor->nb[0] == sizeof(int8_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); |
| } |
| } break; |
| case GGML_TYPE_I16: |
| { |
| assert(tensor->nb[0] == sizeof(int16_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); |
| } |
| } break; |
| case GGML_TYPE_I32: |
| { |
| assert(tensor->nb[0] == sizeof(int32_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); |
| } |
| } break; |
| case GGML_TYPE_F16: |
| { |
| assert(tensor->nb[0] == sizeof(ggml_fp16_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); |
| } |
| } break; |
| case GGML_TYPE_BF16: |
| { |
| assert(tensor->nb[0] == sizeof(ggml_bf16_t)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value)); |
| } |
| } break; |
| case GGML_TYPE_F32: |
| { |
| assert(tensor->nb[0] == sizeof(float)); |
| for (int i = 0; i < n; i++) { |
| ggml_vec_set_f32(nc, (float *)(data + i*n1), value); |
| } |
| } break; |
| default: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| } |
|
|
| return tensor; |
| } |
|
|
| int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { |
| if (!ggml_is_contiguous(tensor)) { |
| int64_t id[4] = { 0, 0, 0, 0 }; |
| ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); |
| return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]); |
| } |
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); |
| return ((int8_t *)(tensor->data))[i]; |
| } |
| case GGML_TYPE_I16: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); |
| return ((int16_t *)(tensor->data))[i]; |
| } |
| case GGML_TYPE_I32: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); |
| return ((int32_t *)(tensor->data))[i]; |
| } |
| case GGML_TYPE_F16: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); |
| return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); |
| } |
| case GGML_TYPE_BF16: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); |
| return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]); |
| } |
| case GGML_TYPE_F32: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(float)); |
| return ((float *)(tensor->data))[i]; |
| } |
| default: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| } |
| } |
|
|
| void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { |
| if (!ggml_is_contiguous(tensor)) { |
| int64_t id[4] = { 0, 0, 0, 0 }; |
| ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); |
| ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value); |
| return; |
| } |
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); |
| ((int8_t *)(tensor->data))[i] = value; |
| } break; |
| case GGML_TYPE_I16: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); |
| ((int16_t *)(tensor->data))[i] = value; |
| } break; |
| case GGML_TYPE_I32: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); |
| ((int32_t *)(tensor->data))[i] = value; |
| } break; |
| case GGML_TYPE_F16: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); |
| ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); |
| } break; |
| case GGML_TYPE_BF16: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); |
| ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value); |
| } break; |
| case GGML_TYPE_F32: |
| { |
| GGML_ASSERT(tensor->nb[0] == sizeof(float)); |
| ((float *)(tensor->data))[i] = value; |
| } break; |
| default: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| } |
| } |
|
|
| int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) { |
| void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; |
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| return ((int8_t *) data)[0]; |
| case GGML_TYPE_I16: |
| return ((int16_t *) data)[0]; |
| case GGML_TYPE_I32: |
| return ((int32_t *) data)[0]; |
| case GGML_TYPE_F16: |
| return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); |
| case GGML_TYPE_BF16: |
| return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); |
| case GGML_TYPE_F32: |
| return ((float *) data)[0]; |
| default: |
| GGML_ABORT("fatal error"); |
| } |
| } |
|
|
| void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) { |
| void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; |
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| { |
| ((int8_t *)(data))[0] = value; |
| } break; |
| case GGML_TYPE_I16: |
| { |
| ((int16_t *)(data))[0] = value; |
| } break; |
| case GGML_TYPE_I32: |
| { |
| ((int32_t *)(data))[0] = value; |
| } break; |
| case GGML_TYPE_F16: |
| { |
| ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); |
| } break; |
| case GGML_TYPE_BF16: |
| { |
| ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value); |
| } break; |
| case GGML_TYPE_F32: |
| { |
| ((float *)(data))[0] = value; |
| } break; |
| default: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| } |
| } |
|
|
| float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { |
| if (!ggml_is_contiguous(tensor)) { |
| int64_t id[4] = { 0, 0, 0, 0 }; |
| ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); |
| return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]); |
| } |
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| { |
| return ((int8_t *)(tensor->data))[i]; |
| } |
| case GGML_TYPE_I16: |
| { |
| return ((int16_t *)(tensor->data))[i]; |
| } |
| case GGML_TYPE_I32: |
| { |
| return ((int32_t *)(tensor->data))[i]; |
| } |
| case GGML_TYPE_F16: |
| { |
| return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); |
| } |
| case GGML_TYPE_BF16: |
| { |
| return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]); |
| } |
| case GGML_TYPE_F32: |
| { |
| return ((float *)(tensor->data))[i]; |
| } |
| default: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| } |
| } |
|
|
| void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { |
| if (!ggml_is_contiguous(tensor)) { |
| int64_t id[4] = { 0, 0, 0, 0 }; |
| ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); |
| ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value); |
| return; |
| } |
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| { |
| ((int8_t *)(tensor->data))[i] = value; |
| } break; |
| case GGML_TYPE_I16: |
| { |
| ((int16_t *)(tensor->data))[i] = value; |
| } break; |
| case GGML_TYPE_I32: |
| { |
| ((int32_t *)(tensor->data))[i] = value; |
| } break; |
| case GGML_TYPE_F16: |
| { |
| ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); |
| } break; |
| case GGML_TYPE_BF16: |
| { |
| ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value); |
| } break; |
| case GGML_TYPE_F32: |
| { |
| ((float *)(tensor->data))[i] = value; |
| } break; |
| default: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| } |
| } |
|
|
| float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) { |
| void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; |
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| return ((int8_t *) data)[0]; |
| case GGML_TYPE_I16: |
| return ((int16_t *) data)[0]; |
| case GGML_TYPE_I32: |
| return ((int32_t *) data)[0]; |
| case GGML_TYPE_F16: |
| return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); |
| case GGML_TYPE_BF16: |
| return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); |
| case GGML_TYPE_F32: |
| return ((float *) data)[0]; |
| default: |
| GGML_ABORT("fatal error"); |
| } |
| } |
|
|
| void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) { |
| void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; |
| switch (tensor->type) { |
| case GGML_TYPE_I8: |
| { |
| ((int8_t *)(data))[0] = value; |
| } break; |
| case GGML_TYPE_I16: |
| { |
| ((int16_t *)(data))[0] = value; |
| } break; |
| case GGML_TYPE_I32: |
| { |
| ((int32_t *)(data))[0] = value; |
| } break; |
| case GGML_TYPE_F16: |
| { |
| ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); |
| } break; |
| case GGML_TYPE_BF16: |
| { |
| ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value); |
| } break; |
| case GGML_TYPE_F32: |
| { |
| ((float *)(data))[0] = value; |
| } break; |
| default: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| } |
| } |
|
|
| |
|
|
| |
|
|
| static void ggml_compute_forward_mul_mat_one_chunk( |
| const struct ggml_compute_params * params, |
| struct ggml_tensor * dst, |
| const enum ggml_type type, |
| const int64_t num_rows_per_vec_dot, |
| const int64_t ir0_start, |
| const int64_t ir0_end, |
| const int64_t ir1_start, |
| const int64_t ir1_end) { |
|
|
| const struct ggml_tensor * src0 = dst->src[0]; |
| const struct ggml_tensor * src1 = dst->src[1]; |
|
|
| GGML_TENSOR_BINARY_OP_LOCALS |
|
|
| const bool src1_cont = ggml_is_contiguous(src1); |
|
|
| ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot; |
| enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; |
|
|
| |
| const int64_t r2 = ne12 / ne02; |
| const int64_t r3 = ne13 / ne03; |
|
|
| |
|
|
| |
| if (ir0_start >= ir0_end || ir1_start >= ir1_end) { |
| return; |
| } |
|
|
| const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; |
| const size_t row_size = ggml_row_size(vec_dot_type, ne10); |
|
|
| assert(ne12 % ne02 == 0); |
| assert(ne13 % ne03 == 0); |
|
|
| |
| const int64_t blck_0 = 16; |
| const int64_t blck_1 = 16; |
|
|
| const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11; |
|
|
| |
| |
| float tmp[32]; |
|
|
| for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { |
| for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { |
| for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1 += num_rows_per_vec_dot) { |
| const int64_t i13 = (ir1 / (ne12 * ne1)); |
| const int64_t i12 = (ir1 - i13 * ne12 * ne1) / ne1; |
| const int64_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1); |
|
|
| |
| const int64_t i03 = i13 / r3; |
| const int64_t i02 = i12 / r2; |
|
|
| const int64_t i1 = i11; |
| const int64_t i2 = i12; |
| const int64_t i3 = i13; |
|
|
| const char * src0_row = (const char*)src0->data + (0 + i02 * nb02 + i03 * nb03); |
|
|
| |
| |
| |
| |
| const char * src1_col = (const char*)wdata + |
| (src1_cont || src1->type != vec_dot_type |
| ? (i11 + i12 * ne11 + i13 * ne12 * ne11) * row_size |
| : (i11 * nb11 + i12 * nb12 + i13 * nb13)); |
| float * dst_col = (float*)((char*)dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); |
|
|
| |
| |
| |
|
|
| for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0 += num_rows_per_vec_dot) { |
| vec_dot(ne00, &tmp[ir0 - iir0], (num_rows_per_vec_dot > 1 ? 16 : 0), src0_row + ir0 * nb01, (num_rows_per_vec_dot > 1 ? nb01 : 0), src1_col, (num_rows_per_vec_dot > 1 ? src1_col_stride : 0), num_rows_per_vec_dot); |
| } |
|
|
| for (int cn = 0; cn < num_rows_per_vec_dot; ++cn) { |
| memcpy(&dst_col[iir0 + cn * nb1 / nb0], tmp + (cn * 16), (MIN(iir0 + blck_0, ir0_end) - iir0) * sizeof(float)); |
| } |
| } |
| } |
| } |
| } |
|
|
| void ggml_compute_forward_mul_mat( |
| const struct ggml_compute_params * params, |
| struct ggml_tensor * dst) { |
|
|
| const struct ggml_tensor * src0 = dst->src[0]; |
| const struct ggml_tensor * src1 = dst->src[1]; |
|
|
| GGML_TENSOR_BINARY_OP_LOCALS |
|
|
| const int ith = params->ith; |
| const int nth = params->nth; |
|
|
| enum ggml_type const vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; |
| ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float; |
| int64_t const vec_dot_num_rows = type_traits_cpu[src0->type].nrows; |
|
|
| GGML_ASSERT(ne0 == ne01); |
| GGML_ASSERT(ne1 == ne11); |
| GGML_ASSERT(ne2 == ne12); |
| GGML_ASSERT(ne3 == ne13); |
|
|
| |
| GGML_ASSERT(nb00 == ggml_type_size(src0->type)); |
| GGML_ASSERT(nb10 == ggml_type_size(src1->type)); |
|
|
| |
| GGML_ASSERT(nb0 == sizeof(float)); |
| GGML_ASSERT(nb0 <= nb1); |
| GGML_ASSERT(nb1 <= nb2); |
| GGML_ASSERT(nb2 <= nb3); |
|
|
| |
| |
|
|
| |
| #if GGML_USE_LLAMAFILE |
| |
| const int64_t r2 = ne12 / ne02; |
| const int64_t r3 = ne13 / ne03; |
|
|
| const bool src1_cont = ggml_is_contiguous(src1); |
|
|
| if (src1_cont) { |
| for (int64_t i13 = 0; i13 < ne13; i13++) |
| for (int64_t i12 = 0; i12 < ne12; i12++) |
| if (!llamafile_sgemm(params, |
| ne01, ne11, ne00/ggml_blck_size(src0->type), |
| (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, |
| nb01/ggml_type_size(src0->type), |
| (const char *)src1->data + i12*nb12 + i13*nb13, |
| nb11/ggml_type_size(src1->type), |
| (char *)dst->data + i12*nb2 + i13*nb3, |
| nb1/ggml_type_size(dst->type), |
| src0->type, |
| src1->type, |
| dst->type)) |
| goto UseGgmlGemm1; |
| return; |
| } |
| UseGgmlGemm1:; |
| #endif |
|
|
| if (src1->type != vec_dot_type) { |
| char * wdata = params->wdata; |
|
|
| const size_t nbw0 = ggml_type_size(vec_dot_type); |
| const size_t nbw1 = ggml_row_size(vec_dot_type, ne10); |
| const size_t nbw2 = nbw1*ne11; |
| const size_t nbw3 = nbw2*ne12; |
|
|
| assert(params->wsize >= ne13*nbw3); |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); |
|
|
| #if 0 |
| for (int64_t i13 = 0; i13 < ne13; ++i13) { |
| for (int64_t i12 = 0; i12 < ne12; ++i12) { |
| for (int64_t i11 = ith; i11 < ne11; i11 += nth) { |
| from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), |
| (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), |
| ne10); |
| } |
| } |
| } |
| #else |
| for (int64_t i13 = 0; i13 < ne13; ++i13) { |
| for (int64_t i12 = 0; i12 < ne12; ++i12) { |
| for (int64_t i11 = 0; i11 < ne11; ++i11) { |
| size_t bs = ggml_blck_size(vec_dot_type); |
| int64_t ne10_block_start = (ith * ne10/bs) / nth; |
| int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth; |
| from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10), |
| (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0), |
| (ne10_block_end - ne10_block_start) * bs); |
| } |
| } |
| } |
| #endif |
| } |
|
|
| if (ith == 0) { |
| |
| atomic_store_explicit(¶ms->threadpool->current_chunk, nth, memory_order_relaxed); |
| } |
|
|
| ggml_barrier(params->threadpool); |
|
|
| #if GGML_USE_LLAMAFILE |
| if (src1->type != vec_dot_type) { |
| const void* wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; |
| const size_t row_size = ggml_row_size(vec_dot_type, ne10); |
|
|
| for (int64_t i13 = 0; i13 < ne13; i13++) |
| for (int64_t i12 = 0; i12 < ne12; i12++) |
| if (!llamafile_sgemm(params, |
| ne01, ne11, ne00/ggml_blck_size(src0->type), |
| (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, |
| nb01/ggml_type_size(src0->type), |
| (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size, |
| row_size/ggml_type_size(vec_dot_type), |
| (char *)dst->data + i12*nb2 + i13*nb3, |
| nb1/ggml_type_size(dst->type), |
| src0->type, |
| vec_dot_type, |
| dst->type)) |
| goto UseGgmlGemm2; |
| return; |
| } |
| UseGgmlGemm2:; |
| #endif |
|
|
| |
| const int64_t nr0 = ne0; |
|
|
| |
| const int64_t nr1 = ne1 * ne2 * ne3; |
|
|
| |
| int chunk_size = 16; |
|
|
| |
| if (nr0 == 1 || nr1 == 1) { |
| chunk_size = 64; |
| } |
|
|
| |
| |
| |
| int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; |
| int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; |
|
|
| |
| |
| |
| if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) { |
| |
| nchunk0 = nr0 > nr1 ? nth : 1; |
| nchunk1 = nr0 > nr1 ? 1 : nth; |
| } |
|
|
| |
| const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; |
| const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; |
|
|
| |
| int current_chunk = ith; |
|
|
| while (current_chunk < nchunk0 * nchunk1) { |
| const int64_t ith0 = current_chunk % nchunk0; |
| const int64_t ith1 = current_chunk / nchunk0; |
|
|
| const int64_t ir0_start = dr0 * ith0; |
| const int64_t ir0_end = MIN(ir0_start + dr0, nr0); |
|
|
| const int64_t ir1_start = dr1 * ith1; |
| const int64_t ir1_end = MIN(ir1_start + dr1, nr1); |
|
|
| |
| int64_t num_rows_per_vec_dot = vec_dot_num_rows; |
|
|
| |
| |
| if ((nr0 % 2 != 0) || (ne11 % 2 != 0) || ((ir0_end - ir0_start) % 2 != 0) || ((ir1_end - ir1_start) % 2 != 0)) { |
| num_rows_per_vec_dot = 1; |
| } |
| ggml_compute_forward_mul_mat_one_chunk(params, dst, src0->type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end); |
|
|
| if (nth >= nchunk0 * nchunk1) { |
| break; |
| } |
|
|
| current_chunk = atomic_fetch_add_explicit(¶ms->threadpool->current_chunk, 1, memory_order_relaxed); |
| } |
| } |
|
|
| |
|
|
| #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ids->ne[0]*ids->ne[1] + (i1)] |
|
|
| struct mmid_row_mapping { |
| int32_t i1; |
| int32_t i2; |
| }; |
|
|
| static void ggml_compute_forward_mul_mat_id_one_chunk( |
| struct ggml_tensor * dst, |
| const struct ggml_tensor * src0, |
| const struct ggml_tensor * src1, |
| const struct ggml_tensor * ids, |
| const int64_t cur_a, |
| const int64_t ir0_start, |
| const int64_t ir0_end, |
| const int64_t ir1_start, |
| const int64_t ir1_end, |
| const char * src0_cur, |
| const struct mmid_row_mapping * matrix_rows, |
| const size_t row_size, |
| const bool src1_cont, |
| const void * wdata) { |
|
|
| GGML_TENSOR_BINARY_OP_LOCALS |
|
|
| const enum ggml_type type = src0->type; |
|
|
| ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot; |
| enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; |
|
|
| const int64_t blck_0 = 16; |
| const int64_t blck_1 = 16; |
|
|
| float tmp[16]; |
|
|
| for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { |
| for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { |
| for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ++ir1) { |
| const int64_t _i12 = ir1; |
|
|
| struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12); |
| const int id = row_mapping.i1; |
|
|
| const int64_t i11 = id % ne11; |
| const int64_t i12 = row_mapping.i2; |
|
|
| const int64_t i1 = id; |
| const int64_t i2 = i12; |
|
|
| |
| |
| |
| |
| const char * src1_col = (const char *) wdata + |
| (src1_cont || src1->type != vec_dot_type |
| ? (i11 + i12*ne11)*row_size |
| : (i11*nb11 + i12*nb12)); |
|
|
| float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2)); |
|
|
| for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) { |
| vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1); |
| } |
|
|
| memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir0_end) - iir0)*sizeof(float)); |
| } |
| } |
| } |
| } |
|
|
| static void * incr_ptr_aligned(void ** p, size_t size, size_t align) { |
|
|
| void * ptr = *p; |
| ptr = (void *) GGML_PAD((uintptr_t) ptr, align); |
| *p = (void *) ((char *) ptr + size); |
| return ptr; |
| } |
|
|
| static void ggml_compute_forward_mul_mat_id( |
| const struct ggml_compute_params * params, |
| struct ggml_tensor * dst) { |
|
|
| const struct ggml_tensor * src0 = dst->src[0]; |
| const struct ggml_tensor * src1 = dst->src[1]; |
| const struct ggml_tensor * ids = dst->src[2]; |
|
|
| GGML_TENSOR_BINARY_OP_LOCALS |
|
|
| const int ith = params->ith; |
| const int nth = params->nth; |
|
|
| const enum ggml_type type = src0->type; |
|
|
| const bool src1_cont = ggml_is_contiguous(src1); |
|
|
| enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; |
| ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float; |
|
|
| |
| GGML_ASSERT(nb00 == ggml_type_size(type)); |
| GGML_ASSERT(nb10 == ggml_type_size(src1->type)); |
|
|
| |
| GGML_ASSERT(nb0 == sizeof(float)); |
| GGML_ASSERT(nb0 <= nb1); |
| GGML_ASSERT(nb1 <= nb2); |
| GGML_ASSERT(nb2 <= nb3); |
|
|
| |
| const int n_ids = ids->ne[0]; |
| const int n_as = ne02; |
|
|
| void * wdata_cur = params->wdata; |
|
|
| if (src1->type != vec_dot_type) { |
| incr_ptr_aligned(&wdata_cur, ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t)); |
| } |
|
|
| int64_t * matrix_row_counts = |
| incr_ptr_aligned(&wdata_cur, n_as*sizeof(int64_t), sizeof(int64_t)); |
|
|
| struct mmid_row_mapping * matrix_rows = |
| incr_ptr_aligned(&wdata_cur, n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping), sizeof(int64_t)); |
|
|
| char (*atomic_current_chunk)[CACHE_LINE_SIZE] = |
| incr_ptr_aligned(&wdata_cur, CACHE_LINE_SIZE * n_as, CACHE_LINE_SIZE); |
|
|
| GGML_ASSERT(params->wsize >= (size_t)((char *) wdata_cur - (char *) params->wdata)); |
|
|
| if (src1->type != vec_dot_type) { |
| char * wdata = params->wdata; |
|
|
| const size_t nbw0 = ggml_type_size(vec_dot_type); |
| const size_t nbw1 = ggml_row_size(vec_dot_type, ne10); |
| const size_t nbw2 = nbw1*ne11; |
| const size_t nbw3 = nbw2*ne12; |
|
|
| assert(params->wsize >= ne13*nbw3); |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); |
|
|
| #if 0 |
| for (int64_t i13 = 0; i13 < ne13; ++i13) { |
| for (int64_t i12 = ith; i12 < ne12; i12 += nth) { |
| for (int64_t i11 = 0; i11 < ne11; ++i11) { |
| from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), |
| (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), |
| ne10); |
| } |
| } |
| } |
| #else |
| for (int64_t i13 = 0; i13 < ne13; ++i13) { |
| for (int64_t i12 = 0; i12 < ne12; ++i12) { |
| for (int64_t i11 = 0; i11 < ne11; ++i11) { |
| size_t bs = ggml_blck_size(vec_dot_type); |
| int64_t ne10_block_start = (ith * ne10/bs) / nth; |
| int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth; |
| from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10), |
| (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0), |
| (ne10_block_end - ne10_block_start) * bs); |
| } |
| } |
| } |
| #endif |
| } |
|
|
| if (ith == 0) { |
| |
| memset(matrix_row_counts, 0, n_as*sizeof(int64_t)); |
|
|
| |
| for (int64_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { |
| for (int id = 0; id < n_ids; ++id) { |
| const int32_t i02 = *(const int32_t *) ((const char *) ids->data + iid1*ids->nb[1] + id*ids->nb[0]); |
|
|
| assert(i02 >= 0 && i02 < n_as); |
|
|
| MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) {id, iid1}; |
| matrix_row_counts[i02] += 1; |
| } |
| } |
| } |
|
|
| |
| for (int cur_a = ith; cur_a < n_as; cur_a += nth) { |
| atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); |
| *current_chunk_ctr = nth; |
| } |
|
|
| ggml_barrier(params->threadpool); |
|
|
| for (int cur_a = 0; cur_a < n_as; ++cur_a) { |
| const int64_t cne1 = matrix_row_counts[cur_a]; |
|
|
| if (cne1 == 0) { |
| continue; |
| } |
|
|
| const char * src0_cur = (const char *) src0->data + cur_a * nb02; |
| const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; |
| const size_t row_size = ggml_row_size(vec_dot_type, ne10); |
|
|
| const int64_t nr0 = ne01; |
| const int64_t nr1 = cne1; |
|
|
| int chunk_size = 16; |
| if (nr0 == 1 || nr1 == 1) { |
| chunk_size = 64; |
| } |
|
|
| |
| const bool disable_chunking = ggml_is_numa(); |
|
|
| int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; |
| int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; |
|
|
| if (nchunk0 * nchunk1 < nth * 4 || disable_chunking) { |
| nchunk0 = nr0 > nr1 ? nth : 1; |
| nchunk1 = nr0 > nr1 ? 1 : nth; |
| } |
|
|
| const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; |
| const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; |
|
|
| int current_chunk = ith; |
|
|
| atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); |
|
|
| while (current_chunk < nchunk0 * nchunk1) { |
| const int64_t ith0 = current_chunk % nchunk0; |
| const int64_t ith1 = current_chunk / nchunk0; |
|
|
| const int64_t ir0_start = dr0 * ith0; |
| const int64_t ir0_end = MIN(ir0_start + dr0, nr0); |
|
|
| const int64_t ir1_start = dr1 * ith1; |
| const int64_t ir1_end = MIN(ir1_start + dr1, nr1); |
|
|
| ggml_compute_forward_mul_mat_id_one_chunk( |
| dst, src0, src1, ids, cur_a, |
| ir0_start, ir0_end, ir1_start, ir1_end, |
| src0_cur, matrix_rows, row_size, src1_cont, wdata |
| ); |
|
|
| if (nth >= nchunk0 * nchunk1) { |
| break; |
| } |
|
|
| current_chunk = atomic_fetch_add_explicit(current_chunk_ctr, 1, memory_order_relaxed); |
| } |
| } |
| } |
|
|
| |
|
|
| static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { |
| GGML_ASSERT(params); |
|
|
| if (tensor->op == GGML_OP_NONE || ggml_is_empty(tensor)) { |
| return; |
| } |
|
|
| |
| if (ggml_cpu_extra_compute_forward(params, tensor)) { |
| return; |
| } |
|
|
| switch (tensor->op) { |
| case GGML_OP_DUP: |
| { |
| ggml_compute_forward_dup(params, tensor); |
| } break; |
| case GGML_OP_ADD: |
| { |
| ggml_compute_forward_add(params, tensor); |
| } break; |
| case GGML_OP_ADD_ID: |
| { |
| ggml_compute_forward_add_id(params, tensor); |
| } break; |
| case GGML_OP_ADD1: |
| { |
| ggml_compute_forward_add1(params, tensor); |
| } break; |
| case GGML_OP_ACC: |
| { |
| ggml_compute_forward_acc(params, tensor); |
| } break; |
| case GGML_OP_SUB: |
| { |
| ggml_compute_forward_sub(params, tensor); |
| } break; |
| case GGML_OP_MUL: |
| { |
| ggml_compute_forward_mul(params, tensor); |
| } break; |
| case GGML_OP_DIV: |
| { |
| ggml_compute_forward_div(params, tensor); |
| } break; |
| case GGML_OP_SQR: |
| { |
| ggml_compute_forward_sqr(params, tensor); |
| } break; |
| case GGML_OP_SQRT: |
| { |
| ggml_compute_forward_sqrt(params, tensor); |
| } break; |
| case GGML_OP_LOG: |
| { |
| ggml_compute_forward_log(params, tensor); |
| } break; |
| case GGML_OP_SIN: |
| { |
| ggml_compute_forward_sin(params, tensor); |
| } break; |
| case GGML_OP_COS: |
| { |
| ggml_compute_forward_cos(params, tensor); |
| } break; |
| case GGML_OP_SUM: |
| { |
| ggml_compute_forward_sum(params, tensor); |
| } break; |
| case GGML_OP_SUM_ROWS: |
| { |
| ggml_compute_forward_sum_rows(params, tensor); |
| } break; |
| case GGML_OP_CUMSUM: |
| { |
| ggml_compute_forward_cumsum(params, tensor); |
| } break; |
| case GGML_OP_MEAN: |
| { |
| ggml_compute_forward_mean(params, tensor); |
| } break; |
| case GGML_OP_ARGMAX: |
| { |
| ggml_compute_forward_argmax(params, tensor); |
| } break; |
| case GGML_OP_COUNT_EQUAL: |
| { |
| ggml_compute_forward_count_equal(params, tensor); |
| } break; |
| case GGML_OP_REPEAT: |
| { |
| ggml_compute_forward_repeat(params, tensor); |
| } break; |
| case GGML_OP_REPEAT_BACK: |
| { |
| ggml_compute_forward_repeat_back(params, tensor); |
| } break; |
| case GGML_OP_CONCAT: |
| { |
| ggml_compute_forward_concat(params, tensor); |
| } break; |
| case GGML_OP_SILU_BACK: |
| { |
| ggml_compute_forward_silu_back(params, tensor); |
| } break; |
| case GGML_OP_NORM: |
| { |
| ggml_compute_forward_norm(params, tensor); |
| } break; |
| case GGML_OP_RMS_NORM: |
| { |
| ggml_compute_forward_rms_norm(params, tensor); |
| } break; |
| case GGML_OP_RMS_NORM_BACK: |
| { |
| ggml_compute_forward_rms_norm_back(params, tensor); |
| } break; |
| case GGML_OP_GROUP_NORM: |
| { |
| ggml_compute_forward_group_norm(params, tensor); |
| } break; |
| case GGML_OP_L2_NORM: |
| { |
| ggml_compute_forward_l2_norm(params, tensor); |
| } break; |
| case GGML_OP_MUL_MAT: |
| { |
| ggml_compute_forward_mul_mat(params, tensor); |
| } break; |
| case GGML_OP_MUL_MAT_ID: |
| { |
| ggml_compute_forward_mul_mat_id(params, tensor); |
| } break; |
| case GGML_OP_OUT_PROD: |
| { |
| ggml_compute_forward_out_prod(params, tensor); |
| } break; |
| case GGML_OP_SCALE: |
| { |
| ggml_compute_forward_scale(params, tensor); |
| } break; |
| case GGML_OP_SET: |
| { |
| ggml_compute_forward_set(params, tensor); |
| } break; |
| case GGML_OP_CPY: |
| { |
| ggml_compute_forward_cpy(params, tensor); |
| } break; |
| case GGML_OP_CONT: |
| { |
| ggml_compute_forward_cont(params, tensor); |
| } break; |
| case GGML_OP_GET_ROWS: |
| { |
| ggml_compute_forward_get_rows(params, tensor); |
| } break; |
| case GGML_OP_GET_ROWS_BACK: |
| { |
| ggml_compute_forward_get_rows_back(params, tensor); |
| } break; |
| case GGML_OP_SET_ROWS: |
| { |
| ggml_compute_forward_set_rows(params, tensor); |
| } break; |
| case GGML_OP_DIAG: |
| { |
| ggml_compute_forward_diag(params, tensor); |
| } break; |
| case GGML_OP_DIAG_MASK_INF: |
| { |
| ggml_compute_forward_diag_mask_inf(params, tensor); |
| } break; |
| case GGML_OP_DIAG_MASK_ZERO: |
| { |
| ggml_compute_forward_diag_mask_zero(params, tensor); |
| } break; |
| case GGML_OP_SOFT_MAX: |
| { |
| ggml_compute_forward_soft_max(params, tensor); |
| } break; |
| case GGML_OP_SOFT_MAX_BACK: |
| { |
| ggml_compute_forward_soft_max_ext_back(params, tensor); |
| } break; |
| case GGML_OP_ROPE: |
| { |
| ggml_compute_forward_rope(params, tensor); |
| } break; |
| case GGML_OP_ROPE_BACK: |
| { |
| ggml_compute_forward_rope_back(params, tensor); |
| } break; |
| case GGML_OP_CLAMP: |
| { |
| ggml_compute_forward_clamp(params, tensor); |
| } break; |
| case GGML_OP_CONV_TRANSPOSE_1D: |
| { |
| ggml_compute_forward_conv_transpose_1d(params, tensor); |
| } break; |
| case GGML_OP_IM2COL: |
| { |
| ggml_compute_forward_im2col(params, tensor); |
| } break; |
| case GGML_OP_IM2COL_BACK: |
| { |
| ggml_compute_forward_im2col_back_f32(params, tensor); |
| } break; |
| case GGML_OP_IM2COL_3D: |
| { |
| ggml_compute_forward_im2col_3d(params, tensor); |
| } break; |
| case GGML_OP_CONV_2D: |
| { |
| ggml_compute_forward_conv_2d(params, tensor); |
| } break; |
| case GGML_OP_CONV_3D: |
| { |
| ggml_compute_forward_conv_3d(params, tensor); |
| } break; |
| case GGML_OP_CONV_2D_DW: |
| { |
| ggml_compute_forward_conv_2d_dw(params, tensor); |
| } break; |
| case GGML_OP_CONV_TRANSPOSE_2D: |
| { |
| ggml_compute_forward_conv_transpose_2d(params, tensor); |
| } break; |
| case GGML_OP_POOL_1D: |
| { |
| ggml_compute_forward_pool_1d(params, tensor); |
| } break; |
| case GGML_OP_POOL_2D: |
| { |
| ggml_compute_forward_pool_2d(params, tensor); |
| } break; |
| case GGML_OP_POOL_2D_BACK: |
| { |
| ggml_compute_forward_pool_2d_back(params, tensor); |
| } break; |
| case GGML_OP_UPSCALE: |
| { |
| ggml_compute_forward_upscale(params, tensor); |
| } break; |
| case GGML_OP_PAD: |
| { |
| ggml_compute_forward_pad(params, tensor); |
| } break; |
| case GGML_OP_PAD_REFLECT_1D: |
| { |
| ggml_compute_forward_pad_reflect_1d(params, tensor); |
| } break; |
| case GGML_OP_ROLL: |
| { |
| ggml_compute_forward_roll(params, tensor); |
| } break; |
| case GGML_OP_ARANGE: |
| { |
| ggml_compute_forward_arange(params, tensor); |
| } break; |
| case GGML_OP_TIMESTEP_EMBEDDING: |
| { |
| ggml_compute_forward_timestep_embedding(params, tensor); |
| } break; |
| case GGML_OP_ARGSORT: |
| { |
| ggml_compute_forward_argsort(params, tensor); |
| } break; |
| case GGML_OP_TOP_K: |
| { |
| ggml_compute_forward_top_k(params, tensor); |
| } break; |
| case GGML_OP_LEAKY_RELU: |
| { |
| ggml_compute_forward_leaky_relu(params, tensor); |
| } break; |
| case GGML_OP_TRI: |
| { |
| ggml_compute_forward_tri(params, tensor); |
| } break; |
| case GGML_OP_FILL: |
| { |
| ggml_compute_forward_fill(params, tensor); |
| } break; |
| case GGML_OP_FLASH_ATTN_EXT: |
| { |
| ggml_compute_forward_flash_attn_ext(params, tensor); |
| } break; |
| case GGML_OP_FLASH_ATTN_BACK: |
| { |
| int32_t t = ggml_get_op_params_i32(tensor, 0); |
| GGML_ASSERT(t == 0 || t == 1); |
| bool masked = t != 0; |
| ggml_compute_forward_flash_attn_back(params, masked, tensor); |
| } break; |
| case GGML_OP_SSM_CONV: |
| { |
| ggml_compute_forward_ssm_conv(params, tensor); |
| } break; |
| case GGML_OP_SSM_SCAN: |
| { |
| ggml_compute_forward_ssm_scan(params, tensor); |
| } break; |
| case GGML_OP_WIN_PART: |
| { |
| ggml_compute_forward_win_part(params, tensor); |
| } break; |
| case GGML_OP_WIN_UNPART: |
| { |
| ggml_compute_forward_win_unpart(params, tensor); |
| } break; |
| case GGML_OP_UNARY: |
| { |
| ggml_compute_forward_unary(params, tensor); |
| } break; |
| case GGML_OP_GLU: |
| { |
| ggml_compute_forward_glu(params, tensor); |
| } break; |
| case GGML_OP_GET_REL_POS: |
| { |
| ggml_compute_forward_get_rel_pos(params, tensor); |
| } break; |
| case GGML_OP_ADD_REL_POS: |
| { |
| ggml_compute_forward_add_rel_pos(params, tensor); |
| } break; |
| case GGML_OP_RWKV_WKV6: |
| { |
| ggml_compute_forward_rwkv_wkv6(params, tensor); |
| } break; |
| case GGML_OP_GATED_LINEAR_ATTN: |
| { |
| ggml_compute_forward_gla(params, tensor); |
| } break; |
| case GGML_OP_RWKV_WKV7: |
| { |
| ggml_compute_forward_rwkv_wkv7(params, tensor); |
| } break; |
| case GGML_OP_SOLVE_TRI: |
| { |
| ggml_compute_forward_solve_tri(params, tensor); |
| } break; |
| case GGML_OP_GATED_DELTA_NET: |
| { |
| ggml_compute_forward_gated_delta_net(params, tensor); |
| } break; |
| case GGML_OP_MAP_CUSTOM1: |
| { |
| ggml_compute_forward_map_custom1(params, tensor); |
| } |
| break; |
| case GGML_OP_MAP_CUSTOM2: |
| { |
| ggml_compute_forward_map_custom2(params, tensor); |
| } |
| break; |
| case GGML_OP_MAP_CUSTOM3: |
| { |
| ggml_compute_forward_map_custom3(params, tensor); |
| } |
| break; |
| case GGML_OP_CUSTOM: |
| { |
| ggml_compute_forward_custom(params, tensor); |
| } |
| break; |
| case GGML_OP_CROSS_ENTROPY_LOSS: |
| { |
| ggml_compute_forward_cross_entropy_loss(params, tensor); |
| } |
| break; |
| case GGML_OP_CROSS_ENTROPY_LOSS_BACK: |
| { |
| ggml_compute_forward_cross_entropy_loss_back(params, tensor); |
| } |
| break; |
| case GGML_OP_OPT_STEP_ADAMW: |
| { |
| ggml_compute_forward_opt_step_adamw(params, tensor); |
| } |
| break; |
| case GGML_OP_OPT_STEP_SGD: |
| { |
| ggml_compute_forward_opt_step_sgd(params, tensor); |
| } |
| break; |
| case GGML_OP_NONE: |
| { |
| |
| } break; |
| case GGML_OP_RESHAPE: |
| { |
| |
| } break; |
| case GGML_OP_PERMUTE: |
| { |
| |
| } break; |
| case GGML_OP_VIEW: |
| { |
| |
| } break; |
| case GGML_OP_TRANSPOSE: |
| { |
| |
| } break; |
| case GGML_OP_COUNT: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| } |
| } |
|
|
| |
| #if defined(__gnu_linux__) |
| static void set_numa_thread_affinity(int thread_n) { |
| if (!ggml_is_numa()) { |
| return; |
| } |
|
|
| int node_num; |
| int rv; |
| size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); |
|
|
| switch(g_state.numa.numa_strategy) { |
| case GGML_NUMA_STRATEGY_DISTRIBUTE: |
| |
| node_num = thread_n % g_state.numa.n_nodes; |
| break; |
| case GGML_NUMA_STRATEGY_ISOLATE: |
| |
| node_num = g_state.numa.current_node; |
| break; |
| case GGML_NUMA_STRATEGY_NUMACTL: |
| |
| rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset); |
| if (rv) { |
| fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); |
| } |
| return; |
| default: |
| return; |
| } |
|
|
| struct ggml_numa_node * node = &g_state.numa.nodes[node_num]; |
|
|
| cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus); |
| CPU_ZERO_S(setsize, cpus); |
| for (size_t i = 0; i < node->n_cpus; ++i) { |
| CPU_SET_S(node->cpus[i], setsize, cpus); |
| } |
|
|
| rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); |
| if (rv) { |
| fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv)); |
| } |
|
|
| CPU_FREE(cpus); |
| } |
|
|
| static void clear_numa_thread_affinity(void) { |
| if (!ggml_is_numa()) { |
| return; |
| } |
|
|
| size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); |
|
|
| cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus); |
| CPU_ZERO_S(setsize, cpus); |
| for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) { |
| CPU_SET_S(i, setsize, cpus); |
| } |
|
|
| int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); |
| if (rv) { |
| fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv)); |
| } |
|
|
| CPU_FREE(cpus); |
| } |
| #else |
| |
| |
| static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); } |
| static void clear_numa_thread_affinity(void) {} |
| #endif |
|
|
| static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { |
| int n_tasks = 0; |
|
|
| if (ggml_is_empty(node)) { |
| |
| n_tasks = 1; |
| return n_tasks; |
| } |
|
|
| switch (node->op) { |
| case GGML_OP_CPY: |
| case GGML_OP_DUP: |
| case GGML_OP_CONT: |
| case GGML_OP_ADD: |
| case GGML_OP_ADD_ID: |
| case GGML_OP_ADD1: |
| case GGML_OP_ACC: |
| case GGML_OP_CUMSUM: |
| case GGML_OP_TRI: |
| case GGML_OP_FILL: |
| { |
| n_tasks = n_threads; |
| } break; |
| case GGML_OP_SUB: |
| case GGML_OP_SQR: |
| case GGML_OP_SQRT: |
| case GGML_OP_LOG: |
| case GGML_OP_SIN: |
| case GGML_OP_COS: |
| case GGML_OP_SUM: |
| case GGML_OP_SUM_ROWS: |
| case GGML_OP_MEAN: |
| case GGML_OP_ARGMAX: |
| { |
| n_tasks = 1; |
| } break; |
| case GGML_OP_COUNT_EQUAL: |
| case GGML_OP_SOLVE_TRI: |
| case GGML_OP_GATED_DELTA_NET: |
| { |
| n_tasks = n_threads; |
| } break; |
| case GGML_OP_REPEAT: |
| case GGML_OP_REPEAT_BACK: |
| case GGML_OP_LEAKY_RELU: |
| { |
| n_tasks = 1; |
| } break; |
| case GGML_OP_UNARY: |
| switch (ggml_get_unary_op(node)) { |
| case GGML_UNARY_OP_ABS: |
| case GGML_UNARY_OP_SGN: |
| case GGML_UNARY_OP_NEG: |
| case GGML_UNARY_OP_STEP: |
| case GGML_UNARY_OP_TANH: |
| case GGML_UNARY_OP_ELU: |
| case GGML_UNARY_OP_RELU: |
| case GGML_UNARY_OP_SIGMOID: |
| case GGML_UNARY_OP_HARDSWISH: |
| case GGML_UNARY_OP_HARDSIGMOID: |
| case GGML_UNARY_OP_EXP: |
| case GGML_UNARY_OP_SOFTPLUS: |
| case GGML_UNARY_OP_EXPM1: |
| case GGML_UNARY_OP_FLOOR: |
| case GGML_UNARY_OP_CEIL: |
| case GGML_UNARY_OP_ROUND: |
| case GGML_UNARY_OP_TRUNC: |
| { |
| n_tasks = 1; |
| } break; |
|
|
| case GGML_UNARY_OP_GELU: |
| case GGML_UNARY_OP_GELU_ERF: |
| case GGML_UNARY_OP_GELU_QUICK: |
| case GGML_UNARY_OP_SILU: |
| case GGML_UNARY_OP_XIELU: |
| { |
| n_tasks = n_threads; |
| } break; |
| default: |
| GGML_ABORT("fatal error"); |
| } |
| break; |
| case GGML_OP_GLU: |
| switch (ggml_get_glu_op(node)) { |
| case GGML_GLU_OP_REGLU: |
| case GGML_GLU_OP_GEGLU: |
| case GGML_GLU_OP_SWIGLU: |
| case GGML_GLU_OP_SWIGLU_OAI: |
| case GGML_GLU_OP_GEGLU_ERF: |
| case GGML_GLU_OP_GEGLU_QUICK: |
| { |
| n_tasks = n_threads; |
| } break; |
| default: |
| GGML_ABORT("fatal error"); |
| } |
| break; |
| case GGML_OP_SILU_BACK: |
| case GGML_OP_MUL: |
| case GGML_OP_DIV: |
| case GGML_OP_NORM: |
| case GGML_OP_RMS_NORM: |
| case GGML_OP_RMS_NORM_BACK: |
| case GGML_OP_L2_NORM: |
| case GGML_OP_GROUP_NORM: |
| case GGML_OP_CONCAT: |
| case GGML_OP_MUL_MAT: |
| case GGML_OP_MUL_MAT_ID: |
| case GGML_OP_OUT_PROD: |
| { |
| n_tasks = n_threads; |
| } break; |
| case GGML_OP_GET_ROWS: |
| case GGML_OP_SET_ROWS: |
| { |
| |
| |
| |
| n_tasks = 1; |
| } break; |
| case GGML_OP_SCALE: |
| case GGML_OP_SET: |
| case GGML_OP_RESHAPE: |
| case GGML_OP_VIEW: |
| case GGML_OP_PERMUTE: |
| case GGML_OP_TRANSPOSE: |
| case GGML_OP_GET_ROWS_BACK: |
| case GGML_OP_DIAG: |
| { |
| n_tasks = 1; |
| } break; |
| case GGML_OP_DIAG_MASK_ZERO: |
| case GGML_OP_DIAG_MASK_INF: |
| case GGML_OP_SOFT_MAX_BACK: |
| case GGML_OP_ROPE: |
| case GGML_OP_ROPE_BACK: |
| case GGML_OP_ADD_REL_POS: |
| { |
| n_tasks = n_threads; |
| } break; |
| case GGML_OP_CLAMP: |
| { |
| n_tasks = 1; |
| } break; |
| case GGML_OP_SOFT_MAX: |
| { |
| n_tasks = MIN(n_threads, ggml_nrows(node->src[0])); |
| } break; |
| case GGML_OP_IM2COL: |
| case GGML_OP_IM2COL_BACK: |
| case GGML_OP_IM2COL_3D: |
| case GGML_OP_CONV_2D: |
| case GGML_OP_CONV_3D: |
| case GGML_OP_CONV_2D_DW: |
| case GGML_OP_CONV_TRANSPOSE_1D: |
| case GGML_OP_CONV_TRANSPOSE_2D: |
| { |
| n_tasks = n_threads; |
| } break; |
| case GGML_OP_POOL_1D: |
| case GGML_OP_POOL_2D: |
| case GGML_OP_POOL_2D_BACK: |
| { |
| n_tasks = 1; |
| } break; |
| case GGML_OP_UPSCALE: |
| case GGML_OP_PAD: |
| case GGML_OP_PAD_REFLECT_1D: |
| case GGML_OP_ROLL: |
| case GGML_OP_ARANGE: |
| case GGML_OP_TIMESTEP_EMBEDDING: |
| case GGML_OP_ARGSORT: |
| case GGML_OP_TOP_K: |
| case GGML_OP_FLASH_ATTN_EXT: |
| case GGML_OP_FLASH_ATTN_BACK: |
| case GGML_OP_SSM_CONV: |
| case GGML_OP_SSM_SCAN: |
| case GGML_OP_RWKV_WKV6: |
| case GGML_OP_GATED_LINEAR_ATTN: |
| case GGML_OP_RWKV_WKV7: |
| { |
| n_tasks = n_threads; |
| } break; |
| case GGML_OP_WIN_PART: |
| case GGML_OP_WIN_UNPART: |
| case GGML_OP_GET_REL_POS: |
| { |
| n_tasks = 1; |
| } break; |
| case GGML_OP_MAP_CUSTOM1: |
| { |
| struct ggml_map_custom1_op_params p; |
| memcpy(&p, node->op_params, sizeof(p)); |
| if (p.n_tasks == GGML_N_TASKS_MAX) { |
| n_tasks = n_threads; |
| } else { |
| n_tasks = MIN(p.n_tasks, n_threads); |
| } |
| } break; |
| case GGML_OP_MAP_CUSTOM2: |
| { |
| struct ggml_map_custom2_op_params p; |
| memcpy(&p, node->op_params, sizeof(p)); |
| if (p.n_tasks == GGML_N_TASKS_MAX) { |
| n_tasks = n_threads; |
| } else { |
| n_tasks = MIN(p.n_tasks, n_threads); |
| } |
| } break; |
| case GGML_OP_MAP_CUSTOM3: |
| { |
| struct ggml_map_custom3_op_params p; |
| memcpy(&p, node->op_params, sizeof(p)); |
| if (p.n_tasks == GGML_N_TASKS_MAX) { |
| n_tasks = n_threads; |
| } else { |
| n_tasks = MIN(p.n_tasks, n_threads); |
| } |
| } break; |
| case GGML_OP_CUSTOM: |
| { |
| struct ggml_custom_op_params p; |
| memcpy(&p, node->op_params, sizeof(p)); |
| if (p.n_tasks == GGML_N_TASKS_MAX) { |
| n_tasks = n_threads; |
| } else { |
| n_tasks = MIN(p.n_tasks, n_threads); |
| } |
| } break; |
| case GGML_OP_CROSS_ENTROPY_LOSS: |
| case GGML_OP_CROSS_ENTROPY_LOSS_BACK: |
| case GGML_OP_OPT_STEP_ADAMW: |
| case GGML_OP_OPT_STEP_SGD: |
| { |
| n_tasks = n_threads; |
| } break; |
| case GGML_OP_NONE: |
| { |
| n_tasks = 1; |
| } break; |
| case GGML_OP_COUNT: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| default: |
| { |
| fprintf(stderr, "%s: op not implemented: ", __func__); |
| if (node->op < GGML_OP_COUNT) { |
| fprintf(stderr, "%s\n", ggml_op_name(node->op)); |
| } else { |
| fprintf(stderr, "%d\n", node->op); |
| } |
| GGML_ABORT("fatal error"); |
| } |
| } |
|
|
| assert(n_tasks > 0); |
|
|
| return n_tasks; |
| } |
|
|
| static thread_ret_t ggml_graph_compute_secondary_thread(void* data); |
|
|
| #if defined(_WIN32) |
| #include "windows.h" |
|
|
| |
| static bool ggml_thread_apply_affinity(bool * mask) { |
| HANDLE h = GetCurrentThread(); |
| uint64_t bitmask = 0ULL; |
|
|
| assert(GGML_MAX_N_THREADS >= 64); |
|
|
| for (int32_t i = 0; i < 8; i++) { |
| int32_t idx = i * 8; |
| uint8_t val = 0; |
| val |= mask[idx + 0] << 0; |
| val |= mask[idx + 1] << 1; |
| val |= mask[idx + 2] << 2; |
| val |= mask[idx + 3] << 3; |
| val |= mask[idx + 4] << 4; |
| val |= mask[idx + 5] << 5; |
| val |= mask[idx + 6] << 6; |
| val |= mask[idx + 7] << 7; |
| bitmask |= (uint64_t)val << idx; |
| } |
|
|
| for (int32_t i = 64; i < GGML_MAX_N_THREADS; i++) { |
| if (mask[i]) { |
| fprintf(stderr, "warn: setting thread-affinity for > 64 CPUs isn't supported on windows!\n"); |
| break; |
| } |
| } |
|
|
| DWORD_PTR m = (DWORD_PTR)bitmask; |
|
|
| m = SetThreadAffinityMask(h, m); |
|
|
| return m != 0; |
| } |
|
|
| static bool ggml_thread_apply_priority(int32_t prio) { |
| |
| |
| DWORD p = THREAD_PRIORITY_NORMAL; |
| switch (prio) { |
| case GGML_SCHED_PRIO_LOW: p = THREAD_PRIORITY_BELOW_NORMAL; break; |
| case GGML_SCHED_PRIO_NORMAL: p = THREAD_PRIORITY_NORMAL; break; |
| case GGML_SCHED_PRIO_MEDIUM: p = THREAD_PRIORITY_ABOVE_NORMAL; break; |
| case GGML_SCHED_PRIO_HIGH: p = THREAD_PRIORITY_HIGHEST; break; |
| case GGML_SCHED_PRIO_REALTIME: p = THREAD_PRIORITY_TIME_CRITICAL; break; |
| } |
|
|
| if (prio != GGML_SCHED_PRIO_LOW) { |
| |
| |
| |
| |
| #if _WIN32_WINNT >= 0x0602 |
| THREAD_POWER_THROTTLING_STATE t; |
| ZeroMemory(&t, sizeof(t)); |
| t.Version = THREAD_POWER_THROTTLING_CURRENT_VERSION; |
| t.ControlMask = THREAD_POWER_THROTTLING_EXECUTION_SPEED; |
| t.StateMask = 0; |
|
|
| if (!SetThreadInformation(GetCurrentThread(), ThreadPowerThrottling, &t, sizeof(t))) { |
| GGML_LOG_DEBUG("failed to disable thread power throttling %d : (%d)\n", prio, (int) GetLastError()); |
| return false; |
| } |
| #endif |
| } |
|
|
| if (prio == GGML_SCHED_PRIO_NORMAL) { |
| |
| return true; |
| } |
|
|
| if (!SetThreadPriority(GetCurrentThread(), p)) { |
| fprintf(stderr, "warn: failed to set thread priority %d : (%d)\n", prio, (int) GetLastError()); |
| return false; |
| } |
|
|
| return true; |
| } |
|
|
| #elif defined(__APPLE__) |
| #include <sys/types.h> |
| #include <sys/resource.h> |
|
|
| static bool ggml_thread_apply_affinity(const bool * mask) { |
| |
| UNUSED(mask); |
| return true; |
| } |
|
|
| static bool ggml_thread_apply_priority(int32_t prio) { |
| struct sched_param p; |
| int32_t policy = SCHED_OTHER; |
| switch (prio) { |
| |
| case GGML_SCHED_PRIO_LOW: policy = SCHED_OTHER; p.sched_priority = 0; break; |
| case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break; |
| case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break; |
| case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break; |
| case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break; |
| } |
|
|
| if (prio == GGML_SCHED_PRIO_NORMAL) { |
| |
| return true; |
| } |
|
|
| int32_t err = pthread_setschedparam(pthread_self(), policy, &p); |
| if (err != 0) { |
| fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err); |
| return false; |
| } |
|
|
| return true; |
| } |
|
|
| #elif defined(__gnu_linux__) |
| |
|
|
| static bool ggml_thread_apply_affinity(const bool * mask) { |
| cpu_set_t cpuset; |
| int err; |
|
|
| CPU_ZERO(&cpuset); |
|
|
| for (uint32_t i = 0; i < GGML_MAX_N_THREADS; i++) { |
| if (mask[i]) { |
| GGML_PRINT_DEBUG("Thread %lx: adding %d to cpuset\n", pthread_self(), i); |
| CPU_SET(i, &cpuset); |
| } |
| } |
|
|
| #ifdef __ANDROID__ |
| err = sched_setaffinity(0, sizeof(cpuset), &cpuset); |
| if (err < 0) { |
| err = errno; |
| } |
| #else |
| err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); |
| #endif |
| if (err != 0) { |
| fprintf(stderr, "warn: failed to set affinity mask 0x%llx : %s (%d)\n", (unsigned long long)mask, strerror(err), err); |
| return false; |
| } |
|
|
| return true; |
| } |
|
|
| static bool ggml_thread_apply_priority(int32_t prio) { |
| struct sched_param p; |
| int32_t policy = SCHED_OTHER; |
| switch (prio) { |
| case GGML_SCHED_PRIO_LOW: policy = SCHED_BATCH; p.sched_priority = 0; break; |
| case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break; |
| case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break; |
| case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break; |
| case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break; |
| } |
|
|
| if (prio == GGML_SCHED_PRIO_NORMAL) { |
| |
| return true; |
| } |
|
|
| int32_t err = pthread_setschedparam(pthread_self(), policy, &p); |
| if (err != 0) { |
| fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err); |
| return false; |
| } |
|
|
| return true; |
| } |
|
|
| #else |
|
|
| static bool ggml_thread_apply_affinity(const bool * mask) { |
| UNUSED(mask); |
| return true; |
| } |
|
|
| static bool ggml_thread_apply_priority(int32_t prio) { |
| UNUSED(prio); |
| return true; |
| } |
|
|
| #endif |
|
|
| static bool ggml_thread_cpumask_is_valid(const bool * mask) { |
| for (int i = 0; i < GGML_MAX_N_THREADS; i++) { |
| if (mask[i]) { return true; } |
| } |
| return false; |
| } |
|
|
| static void ggml_thread_cpumask_next(const bool * global_mask, bool * local_mask, bool strict, int32_t* iter) { |
| if (!strict) { |
| memcpy(local_mask, global_mask, GGML_MAX_N_THREADS); |
| return; |
| } else { |
| memset(local_mask, 0, GGML_MAX_N_THREADS); |
| int32_t base_idx = *iter; |
| for (int32_t i = 0; i < GGML_MAX_N_THREADS; i++) { |
| int32_t idx = base_idx + i; |
| if (idx >= GGML_MAX_N_THREADS) { |
| |
| idx -= GGML_MAX_N_THREADS; |
| } |
| if (global_mask[idx]) { |
| local_mask[idx] = 1; |
| *iter = idx + 1; |
| return; |
| } |
| } |
| } |
| } |
|
|
| void ggml_threadpool_free(struct ggml_threadpool* threadpool) { |
| if (!threadpool) return; |
|
|
| const int n_threads = threadpool->n_threads; |
|
|
| #ifndef GGML_USE_OPENMP |
| struct ggml_compute_state* workers = threadpool->workers; |
|
|
| ggml_mutex_lock(&threadpool->mutex); |
|
|
| threadpool->stop = true; |
| threadpool->pause = false; |
|
|
| ggml_cond_broadcast(&threadpool->cond); |
| ggml_mutex_unlock(&threadpool->mutex); |
|
|
| for (int j = 1; j < n_threads; j++) { |
| int32_t rc = ggml_thread_join(workers[j].thrd, NULL); |
| GGML_ASSERT(rc == GGML_EXIT_SUCCESS || rc == GGML_EXIT_ABORTED); |
| UNUSED(rc); |
| } |
|
|
| ggml_mutex_destroy(&threadpool->mutex); |
| ggml_cond_destroy(&threadpool->cond); |
| #endif |
|
|
| const size_t workers_size = sizeof(struct ggml_compute_state) * n_threads; |
| ggml_aligned_free(threadpool->workers, workers_size); |
| ggml_aligned_free(threadpool, sizeof(struct ggml_threadpool)); |
| } |
|
|
| #ifndef GGML_USE_OPENMP |
| |
| static void ggml_threadpool_pause_locked(struct ggml_threadpool * threadpool) { |
| GGML_PRINT_DEBUG("Pausing threadpool\n"); |
| threadpool->pause = true; |
| ggml_cond_broadcast(&threadpool->cond); |
| } |
|
|
| static void ggml_threadpool_resume_locked(struct ggml_threadpool * threadpool) { |
| GGML_PRINT_DEBUG("Resuming threadpool\n"); |
| threadpool->pause = false; |
| ggml_cond_broadcast(&threadpool->cond); |
| } |
| #endif |
|
|
| void ggml_threadpool_pause(struct ggml_threadpool * threadpool) { |
| #ifndef GGML_USE_OPENMP |
| ggml_mutex_lock(&threadpool->mutex); |
| if (!threadpool->pause) { |
| ggml_threadpool_pause_locked(threadpool); |
| } |
| ggml_mutex_unlock(&threadpool->mutex); |
| #else |
| UNUSED(threadpool); |
| #endif |
| } |
|
|
| void ggml_threadpool_resume(struct ggml_threadpool * threadpool) { |
| #ifndef GGML_USE_OPENMP |
| ggml_mutex_lock(&threadpool->mutex); |
| if (threadpool->pause) { |
| ggml_threadpool_resume_locked(threadpool); |
| } |
| ggml_mutex_unlock(&threadpool->mutex); |
| #else |
| UNUSED(threadpool); |
| #endif |
| } |
|
|
| struct ggml_cplan ggml_graph_plan( |
| const struct ggml_cgraph * cgraph, |
| int n_threads, |
| struct ggml_threadpool * threadpool) { |
|
|
| if (threadpool == NULL) { |
| |
| } |
| if (n_threads <= 0) { |
| n_threads = threadpool ? threadpool->n_threads : GGML_DEFAULT_N_THREADS; |
| } |
|
|
| #if defined(__EMSCRIPTEN__) && !defined(__EMSCRIPTEN_PTHREADS__) |
| |
| n_threads = 1; |
| #endif |
|
|
| size_t work_size = 0; |
|
|
| struct ggml_cplan cplan; |
| memset(&cplan, 0, sizeof(struct ggml_cplan)); |
|
|
| int max_tasks = 1; |
|
|
| |
| for (int i = 0; i < cgraph->n_nodes; i++) { |
| struct ggml_tensor * node = cgraph->nodes[i]; |
|
|
| const int n_tasks = ggml_get_n_tasks(node, n_threads); |
|
|
| max_tasks = MAX(max_tasks, n_tasks); |
|
|
| size_t cur = 0; |
|
|
| if (!ggml_cpu_extra_work_size(n_threads, node, &cur)) { |
| switch (node->op) { |
| case GGML_OP_CPY: |
| case GGML_OP_DUP: |
| { |
| if (ggml_is_quantized(node->type) || |
| |
| (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) || |
| (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16) || |
| |
| (node->src[0]->type == GGML_TYPE_F32 && node->src[1] && node->src[1]->type == GGML_TYPE_I32) || |
| (node->src[0]->type == GGML_TYPE_I32 && node->src[1] && node->src[1]->type == GGML_TYPE_F32)) { |
| cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; |
| } |
| } break; |
| case GGML_OP_ADD: |
| case GGML_OP_ADD_ID: |
| case GGML_OP_ADD1: |
| { |
| if (ggml_is_quantized(node->src[0]->type)) { |
| cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; |
| } |
| } break; |
| case GGML_OP_ACC: |
| { |
| if (ggml_is_quantized(node->src[0]->type)) { |
| cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks; |
| } |
| } break; |
| case GGML_OP_COUNT_EQUAL: |
| { |
| cur = ggml_type_size(node->type)*n_tasks; |
| } break; |
| case GGML_OP_MUL_MAT: |
| { |
| const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type; |
|
|
| if (node->src[1]->type != vec_dot_type) { |
| cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1])); |
| } |
| } break; |
| case GGML_OP_MUL_MAT_ID: |
| { |
| cur = 0; |
| const struct ggml_tensor * src0 = node->src[0]; |
| const struct ggml_tensor * src1 = node->src[1]; |
| const struct ggml_tensor * ids = node->src[2]; |
| const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; |
| const int n_as = src0->ne[2]; |
| |
| if (src1->type != vec_dot_type) { |
| cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)) + sizeof(int64_t); |
| } |
| |
| cur += n_as * sizeof(int64_t) + sizeof(int64_t); |
| |
| cur += n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping) + sizeof(int64_t); |
| |
| cur += CACHE_LINE_SIZE*n_as + CACHE_LINE_SIZE; |
| } break; |
| case GGML_OP_OUT_PROD: |
| { |
| if (ggml_is_quantized(node->src[0]->type)) { |
| cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; |
| } |
| } break; |
| case GGML_OP_SOFT_MAX: |
| case GGML_OP_ROPE: |
| case GGML_OP_ROPE_BACK: |
| { |
| cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; |
| } break; |
| case GGML_OP_CONV_TRANSPOSE_1D: |
| { |
| GGML_ASSERT(node->src[0]->ne[3] == 1); |
| GGML_ASSERT(node->src[1]->ne[2] == 1); |
| GGML_ASSERT(node->src[1]->ne[3] == 1); |
|
|
| const int64_t ne00 = node->src[0]->ne[0]; |
| const int64_t ne01 = node->src[0]->ne[1]; |
| const int64_t ne02 = node->src[0]->ne[2]; |
| const int64_t ne10 = node->src[1]->ne[0]; |
| const int64_t ne11 = node->src[1]->ne[1]; |
|
|
| if ((node->src[0]->type == GGML_TYPE_F16 || |
| node->src[0]->type == GGML_TYPE_BF16) && |
| node->src[1]->type == GGML_TYPE_F32) { |
| cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02; |
| cur += sizeof(ggml_fp16_t)*ne10*ne11; |
| } else if (node->src[0]->type == GGML_TYPE_F32 && |
| node->src[1]->type == GGML_TYPE_F32) { |
| cur += sizeof(float)*ne00*ne01*ne02; |
| cur += sizeof(float)*ne10*ne11; |
| } else { |
| GGML_ABORT("fatal error"); |
| } |
| } break; |
| case GGML_OP_CONV_2D: |
| case GGML_OP_CONV_3D: |
| { |
| cur = GGML_IM2COL_WORK_SIZE; |
| } break; |
| case GGML_OP_CONV_TRANSPOSE_2D: |
| { |
| const int64_t ne00 = node->src[0]->ne[0]; |
| const int64_t ne01 = node->src[0]->ne[1]; |
| const int64_t ne02 = node->src[0]->ne[2]; |
| const int64_t ne03 = node->src[0]->ne[3]; |
|
|
| const int64_t ne10 = node->src[1]->ne[0]; |
| const int64_t ne11 = node->src[1]->ne[1]; |
| const int64_t ne12 = node->src[1]->ne[2]; |
|
|
| cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03; |
| cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12; |
| } break; |
| case GGML_OP_TOP_K: |
| { |
| cur += sizeof(int32_t)*node->src[0]->ne[0]*n_tasks; |
| } break; |
| case GGML_OP_FLASH_ATTN_EXT: |
| { |
| const int64_t neq2 = node->src[0]->ne[2]; |
| const int64_t DK = node->src[1]->ne[0]; |
| const int64_t DV = node->src[2]->ne[0]; |
|
|
| |
| |
| size_t prefill = sizeof(float)*(GGML_FA_TILE_Q*DK + 2*GGML_FA_TILE_Q*GGML_FA_TILE_KV + GGML_FA_TILE_Q*DV + GGML_FA_TILE_KV*DV + GGML_FA_TILE_KV*DK)*n_tasks; |
|
|
| |
| |
| size_t n_chunks = n_tasks; |
| size_t decode = sizeof(float)*(neq2*n_chunks*(2+DV) + n_tasks*(DK + 2*DV)); |
|
|
| cur += MAX(prefill, decode); |
| } break; |
| case GGML_OP_FLASH_ATTN_BACK: |
| { |
| const int64_t D = node->src[0]->ne[0]; |
| const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL); |
| const int64_t mxDn = MAX(D, ne11) * 2; |
| if (node->src[1]->type == GGML_TYPE_F32) { |
| cur = sizeof(float)*mxDn*n_tasks; |
| cur += sizeof(float)*mxDn*n_tasks; |
| } else if (node->src[1]->type == GGML_TYPE_F16) { |
| cur = sizeof(float)*mxDn*n_tasks; |
| cur += sizeof(float)*mxDn*n_tasks; |
| } else if (node->src[1]->type == GGML_TYPE_BF16) { |
| cur = sizeof(float)*mxDn*n_tasks; |
| cur += sizeof(float)*mxDn*n_tasks; |
| } |
| } break; |
|
|
| case GGML_OP_CROSS_ENTROPY_LOSS: |
| { |
| cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks); |
| } break; |
| case GGML_OP_GATED_DELTA_NET: |
| { |
| const int64_t S_v = node->src[2]->ne[0]; |
| cur = S_v * sizeof(float) * n_tasks; |
| } break; |
| case GGML_OP_COUNT: |
| { |
| GGML_ABORT("fatal error"); |
| } |
| default: |
| break; |
| } |
| } |
|
|
| work_size = MAX(work_size, cur); |
| } |
|
|
| if (work_size > 0) { |
| work_size += CACHE_LINE_SIZE*(n_threads); |
| } |
|
|
| cplan.threadpool = threadpool; |
| cplan.n_threads = MIN(max_tasks, n_threads); |
| cplan.work_size = work_size; |
| cplan.work_data = NULL; |
|
|
| return cplan; |
| } |
|
|
| static thread_ret_t ggml_graph_compute_thread(void * data) { |
| struct ggml_compute_state * state = (struct ggml_compute_state *) data; |
| struct ggml_threadpool * tp = state->threadpool; |
|
|
| const struct ggml_cgraph * cgraph = tp->cgraph; |
| const struct ggml_cplan * cplan = tp->cplan; |
|
|
| set_numa_thread_affinity(state->ith); |
|
|
| struct ggml_compute_params params = { |
| state->ith, |
| atomic_load_explicit(&tp->n_graph, memory_order_relaxed) & GGML_THREADPOOL_N_THREADS_MASK, |
| cplan->work_size, |
| cplan->work_data, |
| tp, |
| cplan->use_ref, |
| }; |
|
|
| #ifdef GGML_USE_OPENMP |
| GGML_PRINT_DEBUG("thread #%d compute-start cplan %p\n", state->ith, (const void *)cplan); |
| #else |
| GGML_PRINT_DEBUG("thread #%d compute-start cplan %p last-graph %d\n", state->ith, (const void *)cplan, state->last_graph); |
| #endif |
|
|
| for (int node_n = 0; node_n < cgraph->n_nodes && atomic_load_explicit(&tp->abort, memory_order_relaxed) != node_n; node_n++) { |
| struct ggml_tensor * node = cgraph->nodes[node_n]; |
|
|
| if (ggml_op_is_empty(node->op)) { |
| |
| continue; |
| } |
|
|
| if ((node->flags & GGML_TENSOR_FLAG_COMPUTE) == 0) { |
| continue; |
| } |
|
|
| ggml_compute_forward(¶ms, node); |
|
|
| if (state->ith == 0 && cplan->abort_callback && |
| cplan->abort_callback(cplan->abort_callback_data)) { |
| atomic_store_explicit(&tp->abort, node_n + 1, memory_order_relaxed); |
| tp->ec = GGML_STATUS_ABORTED; |
| } |
|
|
| if (node_n + 1 < cgraph->n_nodes) { |
| ggml_barrier(state->threadpool); |
| } |
| } |
|
|
| #ifdef GGML_USE_OPENMP |
| GGML_PRINT_DEBUG("thread #%d compute-done cplan %p\n", state->ith, (const void *)cplan); |
| #else |
| GGML_PRINT_DEBUG("thread #%d compute-done cplan %p last-graph %d\n", state->ith, (const void *)cplan, state->last_graph); |
| #endif |
|
|
| ggml_barrier(state->threadpool); |
|
|
| return 0; |
| } |
|
|
| #ifndef GGML_USE_OPENMP |
|
|
| |
| |
| static inline bool ggml_graph_compute_thread_ready(struct ggml_compute_state * state) { |
| struct ggml_threadpool * threadpool = state->threadpool; |
|
|
| if (state->pending || threadpool->stop || threadpool->pause) { return true; } |
|
|
| |
| int n_graph = atomic_load_explicit(&threadpool->n_graph, memory_order_relaxed); |
| int n_threads = n_graph & GGML_THREADPOOL_N_THREADS_MASK; |
| if (n_graph != state->last_graph) { |
| state->pending = (state->ith < n_threads); |
| state->last_graph = n_graph; |
| return true; |
| } |
|
|
| return false; |
| } |
|
|
| |
| static inline void ggml_graph_compute_thread_sync(struct ggml_compute_state * state) { |
| |
| #ifdef GGML_TSAN_ENABLED |
| atomic_fetch_add_explicit(&state->threadpool->n_graph, 0, memory_order_seq_cst); |
| #else |
| atomic_thread_fence(memory_order_seq_cst); |
| #endif |
| UNUSED(state); |
| } |
|
|
| static inline bool ggml_graph_compute_poll_for_work(struct ggml_compute_state * state) { |
| struct ggml_threadpool * threadpool = state->threadpool; |
|
|
| |
| |
| const uint64_t n_rounds = 1024UL * 128 * threadpool->poll; |
|
|
| for (uint64_t i=0; !ggml_graph_compute_thread_ready(state) && i < n_rounds; i++) { |
| |
| ggml_thread_cpu_relax(); |
| } |
|
|
| return state->pending; |
| } |
|
|
| static inline bool ggml_graph_compute_check_for_work(struct ggml_compute_state * state) { |
| struct ggml_threadpool * threadpool = state->threadpool; |
|
|
| if (ggml_graph_compute_poll_for_work(state)) { |
| ggml_graph_compute_thread_sync(state); |
| return state->pending; |
| } |
|
|
| ggml_mutex_lock_shared(&threadpool->mutex); |
| while (!ggml_graph_compute_thread_ready(state)) { |
| |
| GGML_PRINT_DEBUG("thread #%d waiting for work (sleeping)\n", state->ith); |
| ggml_cond_wait(&threadpool->cond, &threadpool->mutex); |
| } |
| ggml_mutex_unlock_shared(&threadpool->mutex); |
|
|
| return state->pending; |
| } |
|
|
| static thread_ret_t ggml_graph_compute_secondary_thread(void* data) { |
| struct ggml_compute_state * state = (struct ggml_compute_state *) data; |
| struct ggml_threadpool * threadpool = state->threadpool; |
|
|
| ggml_thread_apply_priority(threadpool->prio); |
| if (ggml_thread_cpumask_is_valid(state->cpumask)) { |
| ggml_thread_apply_affinity(state->cpumask); |
| } |
|
|
| while (true) { |
| |
| while (threadpool->pause) { |
| GGML_PRINT_DEBUG("thread #%d inside pause loop\n", state->ith); |
| ggml_mutex_lock_shared(&threadpool->mutex); |
| if (threadpool->pause) { |
| ggml_cond_wait(&threadpool->cond, &threadpool->mutex); |
| } |
| GGML_PRINT_DEBUG("thread #%d resuming after wait\n", state->ith); |
| ggml_mutex_unlock_shared(&threadpool->mutex); |
| } |
|
|
| |
| if (threadpool->stop) break; |
|
|
| |
| |
|
|
| ggml_graph_compute_check_for_work(state); |
| if (state->pending) { |
| state->pending = false; |
| ggml_graph_compute_thread(state); |
| } |
| } |
|
|
| return (thread_ret_t) 0; |
| } |
|
|
| |
| static void ggml_graph_compute_kickoff(struct ggml_threadpool * threadpool, int n_threads) |
| { |
| |
|
|
| ggml_mutex_lock(&threadpool->mutex); |
|
|
| |
| int n_graph = atomic_load_explicit(&threadpool->n_graph, memory_order_relaxed) >> GGML_THREADPOOL_N_THREADS_BITS; |
| n_graph = ((n_graph + 1) << GGML_THREADPOOL_N_THREADS_BITS) | (n_threads & GGML_THREADPOOL_N_THREADS_MASK); |
|
|
| GGML_PRINT_DEBUG("compute-kickoff: n_threads %d n_graph %d\n", n_threads, n_graph); |
|
|
| |
| |
| atomic_store_explicit(&threadpool->n_graph, n_graph, memory_order_seq_cst); |
|
|
| if (threadpool->pause) { |
| |
| ggml_thread_apply_priority(threadpool->prio); |
| if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) { |
| ggml_thread_apply_affinity(threadpool->workers[0].cpumask); |
| } |
|
|
| |
| ggml_threadpool_resume_locked(threadpool); |
| } else { |
| ggml_cond_broadcast(&threadpool->cond); |
| } |
|
|
| ggml_mutex_unlock(&threadpool->mutex); |
| } |
|
|
| #endif |
|
|
| static struct ggml_threadpool * ggml_threadpool_new_impl( |
| struct ggml_threadpool_params * tpp, |
| struct ggml_cgraph * cgraph, |
| struct ggml_cplan * cplan) { |
|
|
| struct ggml_threadpool * threadpool = |
| ggml_aligned_malloc(sizeof(struct ggml_threadpool)); |
| { |
| threadpool->cgraph = cgraph; |
| threadpool->cplan = cplan; |
| threadpool->n_graph = 0; |
| threadpool->n_barrier = 0; |
| threadpool->n_barrier_passed = 0; |
| threadpool->current_chunk = 0; |
| threadpool->stop = false; |
| threadpool->pause = tpp->paused; |
| threadpool->abort = -1; |
| threadpool->workers = NULL; |
| threadpool->n_threads = tpp->n_threads; |
| threadpool->poll = tpp->poll; |
| threadpool->prio = tpp->prio; |
| threadpool->ec = GGML_STATUS_SUCCESS; |
| } |
|
|
| |
| const size_t workers_size = sizeof(struct ggml_compute_state) * tpp->n_threads; |
| struct ggml_compute_state * workers = ggml_aligned_malloc(workers_size); |
|
|
| memset(workers, 0, workers_size); |
| for (int j = 0; j < tpp->n_threads; j++) { |
| workers[j].threadpool = threadpool; |
| workers[j].ith = j; |
| } |
|
|
| threadpool->workers = workers; |
|
|
| #ifdef GGML_USE_OPENMP |
| int32_t cpumask_iter = 0; |
|
|
| |
| for (int j = 0; j < tpp->n_threads; j++) { |
| ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter); |
| } |
| #else |
| ggml_mutex_init(&threadpool->mutex); |
| ggml_cond_init(&threadpool->cond); |
|
|
| |
| |
|
|
| int32_t cpumask_iter = 0; |
|
|
| for (int j = 1; j < tpp->n_threads; j++) { |
| ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter); |
|
|
| int32_t rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_secondary_thread, &workers[j]); |
| GGML_ASSERT(rc == 0); |
| } |
|
|
| ggml_thread_cpumask_next(tpp->cpumask, workers[0].cpumask, tpp->strict_cpu, &cpumask_iter); |
|
|
| if (!threadpool->pause) { |
| |
| ggml_thread_apply_priority(threadpool->prio); |
| if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) { |
| ggml_thread_apply_affinity(threadpool->workers[0].cpumask); |
| } |
| } |
| #endif |
|
|
| return threadpool; |
| } |
|
|
| struct ggml_threadpool * ggml_threadpool_new(struct ggml_threadpool_params * tpp) { |
| return ggml_threadpool_new_impl(tpp, NULL, NULL); |
| } |
|
|
| enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { |
| ggml_cpu_init(); |
|
|
| GGML_ASSERT(cplan); |
| GGML_ASSERT(cplan->n_threads > 0); |
| GGML_ASSERT(cplan->work_size == 0 || cplan->work_data != NULL); |
|
|
| int n_threads = cplan->n_threads; |
| struct ggml_threadpool * threadpool = cplan->threadpool; |
|
|
| bool disposable_threadpool = false; |
|
|
| if (threadpool == NULL) { |
| |
| disposable_threadpool = true; |
|
|
| struct ggml_threadpool_params ttp = ggml_threadpool_params_default(n_threads); |
| threadpool = ggml_threadpool_new_impl(&ttp, cgraph, cplan); |
| } else { |
| |
| |
| threadpool->cgraph = cgraph; |
| threadpool->cplan = cplan; |
| threadpool->current_chunk = 0; |
| threadpool->abort = -1; |
| threadpool->ec = GGML_STATUS_SUCCESS; |
| } |
|
|
| #ifdef GGML_USE_OPENMP |
| if (n_threads > 1) { |
| #pragma omp parallel num_threads(n_threads) |
| { |
| #pragma omp single |
| { |
| |
| n_threads = omp_get_num_threads(); |
| atomic_store_explicit(&threadpool->n_graph, n_threads, memory_order_relaxed); |
| } |
|
|
| |
| int ith = omp_get_thread_num(); |
|
|
| ggml_thread_apply_priority(threadpool->prio); |
| if (ggml_thread_cpumask_is_valid(threadpool->workers[ith].cpumask)) { |
| ggml_thread_apply_affinity(threadpool->workers[ith].cpumask); |
| } |
| ggml_graph_compute_thread(&threadpool->workers[ith]); |
| } |
| } else { |
| atomic_store_explicit(&threadpool->n_graph, 1, memory_order_relaxed); |
| ggml_graph_compute_thread(&threadpool->workers[0]); |
| } |
| #else |
| if (n_threads > threadpool->n_threads) { |
| GGML_LOG_WARN("cplan requested more threads (%d) than available (%d)\n", n_threads, threadpool->n_threads); |
| n_threads = threadpool->n_threads; |
| } |
|
|
| |
| ggml_graph_compute_kickoff(threadpool, n_threads); |
|
|
| |
| ggml_graph_compute_thread(&threadpool->workers[0]); |
| #endif |
|
|
| |
| clear_numa_thread_affinity(); |
|
|
| enum ggml_status ret = threadpool->ec; |
|
|
| if (disposable_threadpool) { |
| ggml_threadpool_free(threadpool); |
| } |
|
|
| return ret; |
| } |
|
|
| enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) { |
| struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads, NULL); |
|
|
| cplan.work_data = (uint8_t *)ggml_new_buffer(ctx, cplan.work_size); |
|
|
| return ggml_graph_compute(cgraph, &cplan); |
| } |
|
|
| void ggml_cpu_fp32_to_fp32(const float * x, float * y, int64_t n) { |
| memcpy(y, x, n * sizeof(float)); |
| } |
|
|
| void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) { |
| int64_t i = 0; |
| #if defined(__F16C__) |
| #if defined(__AVX512F__) |
| for (; i + 15 < n; i += 16) { |
| __m512 x_vec = _mm512_loadu_ps(x + i); |
| __m256i y_vec = _mm512_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); |
| _mm256_storeu_si256((__m256i *)(y + i), y_vec); |
| } |
| #endif |
| for (; i + 7 < n; i += 8) { |
| __m256 x_vec = _mm256_loadu_ps(x + i); |
| __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); |
| _mm_storeu_si128((__m128i *)(y + i), y_vec); |
| } |
| for (; i + 3 < n; i += 4) { |
| __m128 x_vec = _mm_loadu_ps(x + i); |
| __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); |
| _mm_storel_epi64((__m128i *)(y + i), y_vec); |
| } |
| #elif defined(__riscv_zvfh) |
| for (int vl; i < n; i += vl) { |
| vl = __riscv_vsetvl_e32m2(n - i); |
| vfloat32m2_t vx = __riscv_vle32_v_f32m2(&x[i], vl); |
| vfloat16m1_t vy = __riscv_vfncvt_f_f_w_f16m1(vx, vl); |
| __riscv_vse16_v_f16m1((_Float16 *)&y[i], vy, vl); |
| } |
| #endif |
| for (; i < n; ++i) { |
| y[i] = GGML_CPU_FP32_TO_FP16(x[i]); |
| } |
| } |
|
|
| void ggml_cpu_fp16_to_fp32(const ggml_fp16_t * x, float * y, int64_t n) { |
| int64_t i = 0; |
| #if defined(__F16C__) |
| #if defined(__AVX512F__) |
| for (; i + 15 < n; i += 16) { |
| __m256i x_vec = _mm256_loadu_si256((const __m256i *)(x + i)); |
| __m512 y_vec = _mm512_cvtph_ps(x_vec); |
| _mm512_storeu_ps(y + i, y_vec); |
| } |
| #endif |
| for (; i + 7 < n; i += 8) { |
| __m128i x_vec = _mm_loadu_si128((const __m128i *)(x + i)); |
| __m256 y_vec = _mm256_cvtph_ps(x_vec); |
| _mm256_storeu_ps(y + i, y_vec); |
| } |
| for (; i + 3 < n; i += 4) { |
| __m128i x_vec = _mm_loadl_epi64((const __m128i *)(x + i)); |
| __m128 y_vec = _mm_cvtph_ps(x_vec); |
| _mm_storeu_ps(y + i, y_vec); |
| } |
|
|
| #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfhmin) |
| |
| const int epr = __riscv_vsetvlmax_e16m2(); |
| const int step = epr * 2; |
| const int np = (n & ~(step - 1)); |
|
|
| |
| for (; i < np; i += step) { |
| vfloat16m2_t ax0 = __riscv_vle16_v_f16m2((const _Float16*)x + i, epr); |
| vfloat32m4_t ay0 = __riscv_vfwcvt_f_f_v_f32m4(ax0, epr); |
| __riscv_vse32_v_f32m4(y + i, ay0, epr); |
|
|
| vfloat16m2_t ax1 = __riscv_vle16_v_f16m2((const _Float16*)x + i + epr, epr); |
| vfloat32m4_t ay1 = __riscv_vfwcvt_f_f_v_f32m4(ax1, epr); |
| __riscv_vse32_v_f32m4(y + i + epr, ay1, epr); |
| } |
|
|
| |
| int vl; |
| for (i = np; i < n; i += vl) { |
| vl = __riscv_vsetvl_e16m2(n - i); |
| vfloat16m2_t ax0 = __riscv_vle16_v_f16m2((const _Float16*)x + i, vl); |
| vfloat32m4_t ay0 = __riscv_vfwcvt_f_f_v_f32m4(ax0, vl); |
| __riscv_vse32_v_f32m4(y + i, ay0, vl); |
| } |
|
|
| #endif |
|
|
| for (; i < n; ++i) { |
| y[i] = GGML_CPU_FP16_TO_FP32(x[i]); |
| } |
| } |
|
|
| void ggml_cpu_fp32_to_bf16(const float * x, ggml_bf16_t * y, int64_t n) { |
| int64_t i = 0; |
| for (; i < n; ++i) { |
| y[i] = GGML_FP32_TO_BF16(x[i]); |
| } |
| } |
|
|
| void ggml_cpu_fp32_to_i32(const float * x, int32_t * y, int64_t n) { |
| int64_t i = 0; |
| for (; i < n; ++i) { |
| y[i] = x[i]; |
| } |
| } |
|
|
| void ggml_cpu_bf16_to_fp32(const ggml_bf16_t * x, float * y, int64_t n) { |
| int64_t i = 0; |
| #if defined(__AVX2__) |
| #if defined(__AVX512F__) |
| for (; i + 15 < n; i += 16) { |
| _mm512_storeu_ps(y + i, |
| _mm512_castsi512_ps( |
| _mm512_slli_epi32( |
| _mm512_cvtepu16_epi32( |
| _mm256_loadu_si256( |
| (const __m256i *)(x + i))), |
| 16))); |
| } |
| #endif |
| for (; i + 7 < n; i += 8) { |
| _mm256_storeu_ps(y + i, |
| _mm256_castsi256_ps( |
| _mm256_slli_epi32( |
| _mm256_cvtepu16_epi32( |
| _mm_loadu_si128( |
| (const __m128i *)(x + i))), |
| 16))); |
| } |
| #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfbfmin) |
| |
| const int epr = __riscv_vsetvlmax_e16m2(); |
| const int step = epr * 2; |
| const int np = (n & ~(step - 1)); |
|
|
| |
| for (; i < np; i += step) { |
| vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16*)x + i, epr); |
| vfloat32m4_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax0, epr); |
| __riscv_vse32_v_f32m4(y + i, ay0, epr); |
|
|
| vbfloat16m2_t ax1 = __riscv_vle16_v_bf16m2((const __bf16*)x + i + epr, epr); |
| vfloat32m4_t ay1 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax1, epr); |
| __riscv_vse32_v_f32m4(y + i + epr, ay1, epr); |
| } |
|
|
| |
| int vl; |
| for (i = np; i < n; i += vl) { |
| vl = __riscv_vsetvl_e16m2(n - i); |
| vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16*)x + i, vl); |
| vfloat32m4_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax0, vl); |
| __riscv_vse32_v_f32m4(y + i, ay0, vl); |
| } |
| #endif |
| for (; i < n; i++) { |
| y[i] = GGML_BF16_TO_FP32(x[i]); |
| } |
| } |
|
|
| int ggml_cpu_has_avx(void) { |
| #if defined(__AVX__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_avx_vnni(void) { |
| #if defined(__AVXVNNI__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_avx2(void) { |
| #if defined(__AVX2__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_avx512(void) { |
| #if defined(__AVX512F__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_avx512_vbmi(void) { |
| #if defined(__AVX512VBMI__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_avx512_vnni(void) { |
| #if defined(__AVX512VNNI__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_avx512_bf16(void) { |
| #if defined(__AVX512BF16__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_amx_int8(void) { |
| #if defined(__AMX_INT8__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_bmi2(void) { |
| #if defined(__BMI2__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_fma(void) { |
| #if defined(__FMA__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_arm_fma(void) { |
| #if defined(__ARM_FEATURE_FMA) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_riscv_v(void) { |
| #if defined(__riscv_v_intrinsic) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_get_rvv_vlen(void) { |
| #if defined(__riscv) && defined(__riscv_v_intrinsic) |
| return ggml_riscv_arch_features.rvv_vlen; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_f16c(void) { |
| #if defined(__F16C__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_fp16_va(void) { |
| #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_wasm_simd(void) { |
| #if defined(__wasm_simd128__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_llamafile(void) { |
| #if defined(GGML_USE_LLAMAFILE) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_sse3(void) { |
| #if defined(__SSE3__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_ssse3(void) { |
| #if defined(__SSSE3__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_vsx(void) { |
| #if defined(__POWER9_VECTOR__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_vxe(void) { |
| #if defined(__VXE__) || defined(__VXE2__) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_neon(void) { |
| #if defined(__ARM_ARCH) && defined(__ARM_NEON) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_dotprod(void) { |
| #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_sve(void) { |
| #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_matmul_int8(void) { |
| #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_get_sve_cnt(void) { |
| #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) |
| return ggml_arm_arch_features.sve_cnt; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| int ggml_cpu_has_sme(void) { |
| #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SME) |
| return 1; |
| #else |
| return 0; |
| #endif |
| } |
|
|
| void ggml_cpu_init(void) { |
| |
| { |
| struct ggml_init_params params = { 0, NULL, false }; |
| struct ggml_context * ctx = ggml_init(params); |
| ggml_free(ctx); |
| } |
|
|
| ggml_critical_section_start(); |
|
|
| static bool is_first_call = true; |
|
|
| if (is_first_call) { |
| |
| { |
| const uint64_t t_start = ggml_time_us(); UNUSED(t_start); |
|
|
| for (int i = 0; i < (1 << 16); ++i) { |
| union { |
| uint16_t u16; |
| ggml_fp16_t fp16; |
| } u = {i}; |
| float f = GGML_COMPUTE_FP16_TO_FP32(u.fp16); |
| ggml_table_f32_f16[i] = f; |
| ggml_table_gelu_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_f32(f)); |
| ggml_table_gelu_quick_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_quick_f32(f)); |
| } |
|
|
| |
| for (int i = 0; i < (1 << 8); ++i) { |
| ggml_table_f32_e8m0_half[i] = GGML_E8M0_TO_FP32_HALF(i); |
| } |
|
|
| const uint64_t t_end = ggml_time_us(); UNUSED(t_end); |
|
|
| GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0); |
|
|
| #ifdef GGML_USE_OPENMP |
| |
| |
| |
| |
|
|
| if (!getenv("KMP_BLOCKTIME")) { |
| |
| |
| #ifdef _WIN32 |
| _putenv_s("KMP_BLOCKTIME", "200"); |
| #else |
| setenv("KMP_BLOCKTIME", "200", 0); |
| #endif |
| } |
| #endif |
| } |
|
|
| #if defined(__ARM_ARCH) |
| ggml_init_arm_arch_features(); |
| #endif |
|
|
| #if defined(__riscv) |
| ggml_init_riscv_arch_features(); |
| #endif |
|
|
| is_first_call = false; |
| } |
|
|
| ggml_critical_section_end(); |
| } |
|
|