Spaces:
Running
Running
| // for rocblas_initialize() | |
| // define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication | |
| // on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant | |
| // for large computational tasks. the drawback is that this requires some extra amount of VRAM: | |
| // - 7B quantum model: +100-200 MB | |
| // - 13B quantum model: +200-400 MB | |
| // | |
| //#define GGML_CUDA_FORCE_MMQ | |
| // TODO: improve this to be correct for more hardware | |
| // for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores | |
| // max batch size to use MMQ kernels when tensor cores are available | |
| typedef int8_t int8x4_t __attribute__((ext_vector_type(4))); | |
| static __device__ __forceinline__ int __vsubss4(const int a, const int b) { | |
| const int8x4_t va = reinterpret_cast<const int8x4_t&>(a); | |
| const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b); | |
| const int8x4_t c = __builtin_elementwise_sub_sat(va, vb); | |
| return reinterpret_cast<const int &>(c); | |
| int8x4_t c; | |
| int16_t tmp; | |
| for (int i = 0; i < 4; i++) { | |
| tmp = va[i] - vb[i]; | |
| if(tmp > std::numeric_limits<int8_t>::max()) tmp = std::numeric_limits<int8_t>::max(); | |
| if(tmp < std::numeric_limits<int8_t>::min()) tmp = std::numeric_limits<int8_t>::min(); | |
| c[i] = tmp; | |
| } | |
| return reinterpret_cast<int &>(c); | |
| } | |
| static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) { | |
| c = __builtin_amdgcn_sdot4(a, b, c, false); | |
| c = __builtin_amdgcn_sudot4( true, a, true, b, c, false); | |
| int tmp1; | |
| int tmp2; | |
| asm("\n \ | |
| v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 \n \ | |
| v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 \n \ | |
| v_add3_u32 %0, %1, %2, %0 \n \ | |
| v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2 \n \ | |
| v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 \n \ | |
| v_add3_u32 %0, %1, %2, %0 \n \ | |
| " | |
| : "+v"(c), "=&v"(tmp1), "=&v"(tmp2) | |
| : "v"(a), "v"(b) | |
| ); | |
| const int8x4_t va = reinterpret_cast<const int8x4_t&>(a); | |
| const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b); | |
| c += va[0] * vb[0] + va[1] * vb[1] + va[2] * vb[2] + va[3] * vb[3]; | |
| return c; | |
| } | |
| static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); | |
| [[noreturn]] | |
| static void ggml_cuda_error(const char * stmt, const char * func, const char * file, const int line, const char * msg) { | |
| int id = -1; // in case cudaGetDevice fails | |
| cudaGetDevice(&id); | |
| fprintf(stderr, "CUDA error: %s\n", msg); | |
| fprintf(stderr, " current device: %d, in function %s at %s:%d\n", id, func, file, line); | |
| fprintf(stderr, " %s\n", stmt); | |
| // abort with GGML_ASSERT to get a stack trace | |
| GGML_ASSERT(!"CUDA error"); | |
| } | |
| static const char * cublas_get_error_str(const cublasStatus_t err) { | |
| return cublasGetStatusString(err); | |
| } | |
| static const char * cublas_get_error_str(const cublasStatus_t err) { | |
| switch (err) { | |
| case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; | |
| case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; | |
| case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; | |
| case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; | |
| case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; | |
| case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; | |
| case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; | |
| case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; | |
| case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; | |
| default: return "unknown error"; | |
| } | |
| } | |
| static const char * cu_get_error_str(CUresult err) { | |
| const char * err_str; | |
| cuGetErrorString(err, &err_str); | |
| return err_str; | |
| } | |
| typedef half dfloat; // dequantize float | |
| typedef half2 dfloat2; | |
| typedef float dfloat; // dequantize float | |
| typedef float2 dfloat2; | |
| static __device__ __forceinline__ int get_int_from_int8(const int8_t * x8, const int & i32) { | |
| const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment | |
| int x32 = 0; | |
| x32 |= x16[0] << 0; | |
| x32 |= x16[1] << 16; | |
| return x32; | |
| } | |
| static __device__ __forceinline__ int get_int_from_uint8(const uint8_t * x8, const int & i32) { | |
| const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment | |
| int x32 = 0; | |
| x32 |= x16[0] << 0; | |
| x32 |= x16[1] << 16; | |
| return x32; | |
| } | |
| static __device__ __forceinline__ int get_int_from_int8_aligned(const int8_t * x8, const int & i32) { | |
| return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment | |
| } | |
| static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * x8, const int & i32) { | |
| return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment | |
| } | |
| template<typename T> | |
| using to_t_cuda_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int k, cudaStream_t stream); | |
| typedef to_t_cuda_t<float> to_fp32_cuda_t; | |
| typedef to_t_cuda_t<half> to_fp16_cuda_t; | |
| typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v); | |
| typedef void (*dot_kernel_k_t)(const void * __restrict__ vx, const int ib, const int iqs, const float * __restrict__ y, float & v); | |
| typedef void (*cpy_kernel_t)(const char * cx, char * cdst); | |
| typedef void (*ggml_cuda_func_t)(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); | |
| typedef void (*ggml_cuda_op_mul_mat_t)( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, | |
| const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, | |
| const int64_t src1_padded_row_size, cudaStream_t stream); | |
| typedef void (*ggml_cuda_op_flatten_t)( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream); | |
| // QK = number of values after dequantization | |
| // QR = QK / number of values before dequantization | |
| // QI = number of 32 bit integers before dequantization | |
| typedef struct { | |
| half d; // delta | |
| uint8_t qs[QK4_0 / 2]; // nibbles / quants | |
| } block_q4_0; | |
| static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding"); | |
| typedef struct { | |
| half2 dm; // dm.x = delta, dm.y = min | |
| uint8_t qs[QK4_1 / 2]; // nibbles / quants | |
| } block_q4_1; | |
| static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding"); | |
| typedef struct { | |
| half d; // delta | |
| uint8_t qh[4]; // 5-th bit of quants | |
| uint8_t qs[QK5_0 / 2]; // nibbles / quants | |
| } block_q5_0; | |
| static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); | |
| typedef struct { | |
| half2 dm; // dm.x = delta, dm.y = min | |
| uint8_t qh[4]; // 5-th bit of quants | |
| uint8_t qs[QK5_1 / 2]; // nibbles / quants | |
| } block_q5_1; | |
| static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); | |
| typedef struct { | |
| half d; // delta | |
| int8_t qs[QK8_0]; // quants | |
| } block_q8_0; | |
| static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding"); | |
| typedef struct { | |
| half2 ds; // ds.x = delta, ds.y = sum | |
| int8_t qs[QK8_0]; // quants | |
| } block_q8_1; | |
| static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_fp16_t) + QK8_0, "wrong q8_1 block size/padding"); | |
| typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs); | |
| typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc); | |
| typedef void (*load_tiles_cuda_t)( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row); | |
| typedef float (*vec_dot_q_mul_mat_cuda_t)( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k); | |
| //================================= k-quants | |
| typedef struct { | |
| uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits | |
| uint8_t qs[QK_K/4]; // quants | |
| half2 dm; // super-block scale for quantized scales/mins | |
| } block_q2_K; | |
| static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); | |
| typedef struct { | |
| uint8_t hmask[QK_K/8]; // quants - high bit | |
| uint8_t qs[QK_K/4]; // quants - low 2 bits | |
| uint8_t scales[2]; // scales, quantized with 8 bits | |
| uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits | |
| half d; // super-block scale | |
| } block_q3_K; | |
| //static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + K_SCALE_SIZE, "wrong q3_K block size/padding"); | |
| typedef struct { | |
| half dm[2]; // super-block scales/mins | |
| uint8_t scales[2]; // 4-bit block scales/mins | |
| uint8_t qs[QK_K/2]; // 4--bit quants | |
| } block_q4_K; | |
| static_assert(sizeof(block_q4_K) == sizeof(half2) + QK_K/2 + 2, "wrong q4_K block size/padding"); | |
| typedef struct { | |
| half2 dm; // super-block scale for quantized scales/mins | |
| uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits | |
| uint8_t qs[QK_K/2]; // 4--bit quants | |
| } block_q4_K; | |
| static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2, "wrong q4_K block size/padding"); | |
| typedef struct { | |
| half d; // super-block scale | |
| int8_t scales[QK_K/16]; // block scales | |
| uint8_t qh[QK_K/8]; // quants, high bit | |
| uint8_t qs[QK_K/2]; // quants, low 4 bits | |
| } block_q5_K; | |
| static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding"); | |
| typedef struct { | |
| half2 dm; // super-block scale for quantized scales/mins | |
| uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits | |
| uint8_t qh[QK_K/8]; // quants, high bit | |
| uint8_t qs[QK_K/2]; // quants, low 4 bits | |
| } block_q5_K; | |
| static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding"); | |
| typedef struct { | |
| uint8_t ql[QK_K/2]; // quants, lower 4 bits | |
| uint8_t qh[QK_K/4]; // quants, upper 2 bits | |
| int8_t scales[QK_K/16]; // scales | |
| half d; // delta | |
| } block_q6_K; | |
| static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_K block size/padding"); | |
| typedef struct { | |
| half d; | |
| uint16_t qs[QK_K/8]; | |
| } block_iq2_xxs; | |
| static_assert(sizeof(block_iq2_xxs) == sizeof(ggml_fp16_t) + QK_K/8*sizeof(uint16_t), "wrong iq2_xxs block size/padding"); | |
| typedef struct { | |
| half d; | |
| uint16_t qs[QK_K/8]; | |
| uint8_t scales[QK_K/32]; | |
| } block_iq2_xs; | |
| static_assert(sizeof(block_iq2_xs) == sizeof(ggml_fp16_t) + QK_K/8*sizeof(uint16_t) + QK_K/32, "wrong iq2_xs block size/padding"); | |
| // dmmv = dequantize_mul_mat_vec | |
| static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2"); | |
| static cudaStream_t g_cudaStreams[GGML_CUDA_MAX_DEVICES][MAX_STREAMS] = { { nullptr } }; | |
| struct ggml_tensor_extra_gpu { | |
| void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors | |
| cudaEvent_t events[GGML_CUDA_MAX_DEVICES][MAX_STREAMS]; // events for synchronizing multiple GPUs | |
| }; | |
| // this is faster on Windows | |
| // probably because the Windows CUDA libraries forget to make this check before invoking the drivers | |
| static void ggml_cuda_set_device(const int device) { | |
| int current_device; | |
| CUDA_CHECK(cudaGetDevice(¤t_device)); | |
| if (device == current_device) { | |
| return; | |
| } | |
| CUDA_CHECK(cudaSetDevice(device)); | |
| } | |
| static int g_device_count = -1; | |
| static int g_main_device = 0; | |
| static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0}; | |
| struct cuda_device_capabilities { | |
| int cc; // compute capability | |
| size_t smpb; // max. shared memory per block | |
| bool vmm; // virtual memory support | |
| size_t vmm_granularity; // granularity of virtual memory | |
| }; | |
| static cuda_device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, 0, false, 0} }; | |
| static void * g_scratch_buffer = nullptr; | |
| static size_t g_scratch_size = 0; // disabled by default | |
| static size_t g_scratch_offset = 0; | |
| static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr}; | |
| [[noreturn]] | |
| static __device__ void bad_arch() { | |
| printf("ERROR: ggml-cuda was compiled without support for the current GPU architecture.\n"); | |
| __trap(); | |
| (void) bad_arch; // suppress unused function warning | |
| } | |
| static __device__ __forceinline__ float warp_reduce_sum(float x) { | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| x += __shfl_xor_sync(0xffffffff, x, mask, 32); | |
| } | |
| return x; | |
| } | |
| static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) { | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32); | |
| a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32); | |
| } | |
| return a; | |
| } | |
| static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) { | |
| (void) a; | |
| bad_arch(); | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, mask, 32)); | |
| } | |
| return a; | |
| } | |
| static __device__ __forceinline__ float warp_reduce_max(float x) { | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); | |
| } | |
| return x; | |
| } | |
| static __device__ __forceinline__ half2 warp_reduce_max(half2 x) { | |
| (void) x; | |
| bad_arch(); | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| x = __hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); | |
| } | |
| return x; | |
| } | |
| static __device__ __forceinline__ float op_repeat(const float a, const float b) { | |
| return b; | |
| GGML_UNUSED(a); | |
| } | |
| static __device__ __forceinline__ float op_add(const float a, const float b) { | |
| return a + b; | |
| } | |
| static __device__ __forceinline__ float op_mul(const float a, const float b) { | |
| return a * b; | |
| } | |
| static __device__ __forceinline__ float op_div(const float a, const float b) { | |
| return a / b; | |
| } | |
| template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t> | |
| static __global__ void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst, | |
| int ne0, int ne1, int ne2, int ne3, | |
| int ne10, int ne11, int ne12, int ne13, | |
| /*int s0, */ int s1, int s2, int s3, | |
| /*int s10,*/ int s11, int s12, int s13) { | |
| const int i0s = blockDim.x*blockIdx.x + threadIdx.x; | |
| const int i1 = (blockDim.y*blockIdx.y + threadIdx.y); | |
| const int i2 = (blockDim.z*blockIdx.z + threadIdx.z) / ne3; | |
| const int i3 = (blockDim.z*blockIdx.z + threadIdx.z) % ne3; | |
| if (i0s >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { | |
| return; | |
| } | |
| const int i11 = i1 % ne11; | |
| const int i12 = i2 % ne12; | |
| const int i13 = i3 % ne13; | |
| const size_t i_src0 = i3*s3 + i2*s2 + i1*s1; | |
| const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; | |
| const size_t i_dst = i_src0; | |
| const src0_t * src0_row = src0 + i_src0; | |
| const src1_t * src1_row = src1 + i_src1; | |
| dst_t * dst_row = dst + i_dst; | |
| for (int i0 = i0s; i0 < ne0; i0 += blockDim.x*gridDim.x) { | |
| const int i10 = i0 % ne10; | |
| dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]); | |
| } | |
| } | |
| template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t> | |
| static __global__ void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t * dst, | |
| int ne0, int ne1, int ne2, int ne3, | |
| int ne10, int ne11, int ne12, int ne13, | |
| /*int s0, */ int s1, int s2, int s3, | |
| /*int s10,*/ int s11, int s12, int s13) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| const int i3 = i/(ne2*ne1*ne0); | |
| const int i2 = (i/(ne1*ne0)) % ne2; | |
| const int i1 = (i/ne0) % ne1; | |
| const int i0 = i % ne0; | |
| if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { | |
| return; | |
| } | |
| const int i11 = i1 % ne11; | |
| const int i12 = i2 % ne12; | |
| const int i13 = i3 % ne13; | |
| const size_t i_src0 = i3*s3 + i2*s2 + i1*s1; | |
| const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; | |
| const size_t i_dst = i_src0; | |
| const src0_t * src0_row = src0 + i_src0; | |
| const src1_t * src1_row = src1 + i_src1; | |
| dst_t * dst_row = dst + i_dst; | |
| const int i10 = i0 % ne10; | |
| dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]); | |
| } | |
| static __global__ void acc_f32(const float * x, const float * y, float * dst, const int ne, | |
| const int ne10, const int ne11, const int ne12, | |
| const int nb1, const int nb2, int offset) { | |
| const int i = blockDim.x * blockIdx.x + threadIdx.x; | |
| if (i >= ne) { | |
| return; | |
| } | |
| int src1_idx = i - offset; | |
| int oz = src1_idx / nb2; | |
| int oy = (src1_idx - (oz * nb2)) / nb1; | |
| int ox = src1_idx % nb1; | |
| if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) { | |
| dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11]; | |
| } else { | |
| dst[i] = x[i]; | |
| } | |
| } | |
| static __global__ void gelu_f32(const float * x, float * dst, const int k) { | |
| const float GELU_COEF_A = 0.044715f; | |
| const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| float xi = x[i]; | |
| dst[i] = 0.5f*xi*(1.0f + tanhf(SQRT_2_OVER_PI*xi*(1.0f + GELU_COEF_A*xi*xi))); | |
| } | |
| static __global__ void silu_f32(const float * x, float * dst, const int k) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| dst[i] = x[i] / (1.0f + expf(-x[i])); | |
| } | |
| static __global__ void gelu_quick_f32(const float * x, float * dst, int k) { | |
| const float GELU_QUICK_COEF = -1.702f; | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| dst[i] = x[i] * (1.0f / (1.0f + expf(GELU_QUICK_COEF * x[i]))); | |
| } | |
| static __global__ void tanh_f32(const float * x, float * dst, int k) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| dst[i] = tanhf(x[i]); | |
| } | |
| static __global__ void relu_f32(const float * x, float * dst, const int k) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| dst[i] = fmaxf(x[i], 0); | |
| } | |
| static __global__ void leaky_relu_f32(const float * x, float * dst, const int k, const float negative_slope) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| dst[i] = fmaxf(x[i], 0) + fminf(x[i], 0.0f) * negative_slope; | |
| } | |
| static __global__ void sqr_f32(const float * x, float * dst, const int k) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| dst[i] = x[i] * x[i]; | |
| } | |
| template <int block_size> | |
| static __global__ void norm_f32(const float * x, float * dst, const int ncols, const float eps) { | |
| const int row = blockIdx.x*blockDim.y + threadIdx.y; | |
| const int tid = threadIdx.x; | |
| float2 mean_var = make_float2(0.f, 0.f); | |
| for (int col = tid; col < ncols; col += block_size) { | |
| const float xi = x[row*ncols + col]; | |
| mean_var.x += xi; | |
| mean_var.y += xi * xi; | |
| } | |
| // sum up partial sums | |
| mean_var = warp_reduce_sum(mean_var); | |
| if (block_size > WARP_SIZE) { | |
| __shared__ float2 s_sum[32]; | |
| int warp_id = threadIdx.x / WARP_SIZE; | |
| int lane_id = threadIdx.x % WARP_SIZE; | |
| if (lane_id == 0) { | |
| s_sum[warp_id] = mean_var; | |
| } | |
| __syncthreads(); | |
| mean_var = s_sum[lane_id]; | |
| mean_var = warp_reduce_sum(mean_var); | |
| } | |
| const float mean = mean_var.x / ncols; | |
| const float var = mean_var.y / ncols - mean * mean; | |
| const float inv_std = rsqrtf(var + eps); | |
| for (int col = tid; col < ncols; col += block_size) { | |
| dst[row*ncols + col] = (x[row*ncols + col] - mean) * inv_std; | |
| } | |
| } | |
| static __global__ void concat_f32(const float * x,const float * y, float * dst, const int ne0, const int ne02) { | |
| int nidx = threadIdx.x + blockIdx.x * blockDim.x; | |
| if (nidx >= ne0) { | |
| return; | |
| } | |
| // operation | |
| int offset_dst = | |
| nidx + | |
| blockIdx.y * ne0 + | |
| blockIdx.z * ne0 * gridDim.y; | |
| if (blockIdx.z < ne02) { // src0 | |
| int offset_src = | |
| nidx + | |
| blockIdx.y * ne0 + | |
| blockIdx.z * ne0 * gridDim.y; | |
| dst[offset_dst] = x[offset_src]; | |
| } else { | |
| int offset_src = | |
| nidx + | |
| blockIdx.y * ne0 + | |
| (blockIdx.z - ne02) * ne0 * gridDim.y; | |
| dst[offset_dst] = y[offset_src]; | |
| } | |
| } | |
| static __global__ void upscale_f32(const float * x, float * dst, const int ne00, const int nb02, const int scale_factor) { | |
| int ne0 = ne00 * scale_factor; | |
| int nidx = threadIdx.x + blockIdx.x * blockDim.x; | |
| if (nidx >= ne0) { | |
| return; | |
| } | |
| // operation | |
| int i00 = nidx / scale_factor; | |
| int i01 = blockIdx.y / scale_factor; | |
| int offset_src = | |
| i00 + | |
| i01 * ne00 + | |
| blockIdx.z * nb02; | |
| int offset_dst = | |
| nidx + | |
| blockIdx.y * ne0 + | |
| blockIdx.z * ne0 * gridDim.y; | |
| dst[offset_dst] = x[offset_src]; | |
| } | |
| static __global__ void pad_f32(const float * x, float * dst, const int ne0, const int ne00, const int ne01, const int ne02) { | |
| int nidx = threadIdx.x + blockIdx.x * blockDim.x; | |
| if (nidx >= ne0) { | |
| return; | |
| } | |
| // operation | |
| int offset_dst = | |
| nidx + | |
| blockIdx.y * ne0 + | |
| blockIdx.z * ne0 * gridDim.y; | |
| if (nidx < ne00 && blockIdx.y < ne01 && blockIdx.z < ne02) { | |
| int offset_src = | |
| nidx + | |
| blockIdx.y * ne00 + | |
| blockIdx.z * ne00 * ne01; | |
| dst[offset_dst] = x[offset_src]; | |
| } else { | |
| dst[offset_dst] = 0.0f; | |
| } | |
| } | |
| template <int block_size> | |
| static __global__ void group_norm_f32(const float * x, float * dst, const int group_size, const int ne_elements, const float eps) { | |
| int start = blockIdx.x * group_size; | |
| int end = start + group_size; | |
| start += threadIdx.x; | |
| if (end >= ne_elements) { | |
| end = ne_elements; | |
| } | |
| float tmp = 0.0f; // partial sum for thread in warp | |
| for (int j = start; j < end; j += block_size) { | |
| tmp += x[j]; | |
| } | |
| tmp = warp_reduce_sum(tmp); | |
| if (block_size > WARP_SIZE) { | |
| __shared__ float s_sum[32]; | |
| int warp_id = threadIdx.x / WARP_SIZE; | |
| int lane_id = threadIdx.x % WARP_SIZE; | |
| if (lane_id == 0) { | |
| s_sum[warp_id] = tmp; | |
| } | |
| __syncthreads(); | |
| tmp = s_sum[lane_id]; | |
| tmp = warp_reduce_sum(tmp); | |
| } | |
| float mean = tmp / group_size; | |
| tmp = 0.0f; | |
| for (int j = start; j < end; j += block_size) { | |
| float xi = x[j] - mean; | |
| dst[j] = xi; | |
| tmp += xi * xi; | |
| } | |
| tmp = warp_reduce_sum(tmp); | |
| if (block_size > WARP_SIZE) { | |
| __shared__ float s_sum[32]; | |
| int warp_id = threadIdx.x / WARP_SIZE; | |
| int lane_id = threadIdx.x % WARP_SIZE; | |
| if (lane_id == 0) { | |
| s_sum[warp_id] = tmp; | |
| } | |
| __syncthreads(); | |
| tmp = s_sum[lane_id]; | |
| tmp = warp_reduce_sum(tmp); | |
| } | |
| float variance = tmp / group_size; | |
| float scale = rsqrtf(variance + eps); | |
| for (int j = start; j < end; j += block_size) { | |
| dst[j] *= scale; | |
| } | |
| } | |
| template <int block_size> | |
| static __global__ void rms_norm_f32(const float * x, float * dst, const int ncols, const float eps) { | |
| const int row = blockIdx.x*blockDim.y + threadIdx.y; | |
| const int tid = threadIdx.x; | |
| float tmp = 0.0f; // partial sum for thread in warp | |
| for (int col = tid; col < ncols; col += block_size) { | |
| const float xi = x[row*ncols + col]; | |
| tmp += xi * xi; | |
| } | |
| // sum up partial sums | |
| tmp = warp_reduce_sum(tmp); | |
| if (block_size > WARP_SIZE) { | |
| __shared__ float s_sum[32]; | |
| int warp_id = threadIdx.x / WARP_SIZE; | |
| int lane_id = threadIdx.x % WARP_SIZE; | |
| if (lane_id == 0) { | |
| s_sum[warp_id] = tmp; | |
| } | |
| __syncthreads(); | |
| tmp = s_sum[lane_id]; | |
| tmp = warp_reduce_sum(tmp); | |
| } | |
| const float mean = tmp / ncols; | |
| const float scale = rsqrtf(mean + eps); | |
| for (int col = tid; col < ncols; col += block_size) { | |
| dst[row*ncols + col] = scale * x[row*ncols + col]; | |
| } | |
| } | |
| static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ | |
| const block_q4_0 * x = (const block_q4_0 *) vx; | |
| const dfloat d = x[ib].d; | |
| const int vui = x[ib].qs[iqs]; | |
| v.x = vui & 0xF; | |
| v.y = vui >> 4; | |
| v = __hsub2(v, {8.0f, 8.0f}); | |
| v = __hmul2(v, {d, d}); | |
| v.x = (v.x - 8.0f) * d; | |
| v.y = (v.y - 8.0f) * d; | |
| } | |
| static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, dfloat2 & v){ | |
| const block_q4_1 * x = (const block_q4_1 *) vx; | |
| const dfloat d = __low2half(x[ib].dm); | |
| const dfloat m = __high2half(x[ib].dm); | |
| const int vui = x[ib].qs[iqs]; | |
| v.x = vui & 0xF; | |
| v.y = vui >> 4; | |
| v = __hmul2(v, {d, d}); | |
| v = __hadd2(v, {m, m}); | |
| v.x = (v.x * d) + m; | |
| v.y = (v.y * d) + m; | |
| } | |
| static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ | |
| const block_q5_0 * x = (const block_q5_0 *) vx; | |
| const dfloat d = x[ib].d; | |
| uint32_t qh; | |
| memcpy(&qh, x[ib].qh, sizeof(qh)); | |
| const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; | |
| const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; | |
| v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); | |
| v.y = ((x[ib].qs[iqs] >> 4) | xh_1); | |
| v = __hsub2(v, {16.0f, 16.0f}); | |
| v = __hmul2(v, {d, d}); | |
| v.x = (v.x - 16.0f) * d; | |
| v.y = (v.y - 16.0f) * d; | |
| } | |
| static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, dfloat2 & v){ | |
| const block_q5_1 * x = (const block_q5_1 *) vx; | |
| const dfloat d = __low2half(x[ib].dm); | |
| const dfloat m = __high2half(x[ib].dm); | |
| uint32_t qh; | |
| memcpy(&qh, x[ib].qh, sizeof(qh)); | |
| const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; | |
| const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; | |
| v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); | |
| v.y = ((x[ib].qs[iqs] >> 4) | xh_1); | |
| v = __hmul2(v, {d, d}); | |
| v = __hadd2(v, {m, m}); | |
| v.x = (v.x * d) + m; | |
| v.y = (v.y * d) + m; | |
| } | |
| static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ | |
| const block_q8_0 * x = (const block_q8_0 *) vx; | |
| const dfloat d = x[ib].d; | |
| v.x = x[ib].qs[iqs + 0]; | |
| v.y = x[ib].qs[iqs + 1]; | |
| v = __hmul2(v, {d, d}); | |
| v.x *= d; | |
| v.y *= d; | |
| } | |
| //================================== k-quants | |
| template<typename dst_t> | |
| static __global__ void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { | |
| const int i = blockIdx.x; | |
| const block_q2_K * x = (const block_q2_K *) vx; | |
| const int tid = threadIdx.x; | |
| const int n = tid/32; | |
| const int l = tid - 32*n; | |
| const int is = 8*n + l/16; | |
| const uint8_t q = x[i].qs[32*n + l]; | |
| dst_t * y = yy + i*QK_K + 128*n; | |
| float dall = __low2half(x[i].dm); | |
| float dmin = __high2half(x[i].dm); | |
| y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); | |
| y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4); | |
| y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4); | |
| y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4); | |
| const int is = tid/16; // 0 or 1 | |
| const int il = tid%16; // 0...15 | |
| const uint8_t q = x[i].qs[il] >> (2*is); | |
| dst_t * y = yy + i*QK_K + 16*is + il; | |
| float dall = __low2half(x[i].dm); | |
| float dmin = __high2half(x[i].dm); | |
| y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); | |
| y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4); | |
| } | |
| template<typename dst_t> | |
| static __global__ void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { | |
| const int i = blockIdx.x; | |
| const block_q3_K * x = (const block_q3_K *) vx; | |
| const int r = threadIdx.x/4; | |
| const int tid = r/2; | |
| const int is0 = r%2; | |
| const int l0 = 16*is0 + 4*(threadIdx.x%4); | |
| const int n = tid / 4; | |
| const int j = tid - 4*n; | |
| uint8_t m = 1 << (4*n + j); | |
| int is = 8*n + 2*j + is0; | |
| int shift = 2*j; | |
| int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) : | |
| is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) : | |
| is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) : | |
| (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4); | |
| float d_all = x[i].d; | |
| float dl = d_all * (us - 32); | |
| dst_t * y = yy + i*QK_K + 128*n + 32*j; | |
| const uint8_t * q = x[i].qs + 32*n; | |
| const uint8_t * hm = x[i].hmask; | |
| for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4)); | |
| const int tid = threadIdx.x; | |
| const int is = tid/16; // 0 or 1 | |
| const int il = tid%16; // 0...15 | |
| const int im = il/8; // 0...1 | |
| const int in = il%8; // 0...7 | |
| dst_t * y = yy + i*QK_K + 16*is + il; | |
| const uint8_t q = x[i].qs[il] >> (2*is); | |
| const uint8_t h = x[i].hmask[in] >> (2*is + im); | |
| const float d = (float)x[i].d; | |
| if (is == 0) { | |
| y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); | |
| y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); | |
| } else { | |
| y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); | |
| y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); | |
| } | |
| } | |
| static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) { | |
| if (j < 4) { | |
| d = q[j] & 63; m = q[j + 4] & 63; | |
| } else { | |
| d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); | |
| m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); | |
| } | |
| } | |
| template<typename dst_t> | |
| static __global__ void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { | |
| const block_q4_K * x = (const block_q4_K *) vx; | |
| const int i = blockIdx.x; | |
| // assume 32 threads | |
| const int tid = threadIdx.x; | |
| const int il = tid/8; | |
| const int ir = tid%8; | |
| const int is = 2*il; | |
| const int n = 4; | |
| dst_t * y = yy + i*QK_K + 64*il + n*ir; | |
| const float dall = __low2half(x[i].dm); | |
| const float dmin = __high2half(x[i].dm); | |
| const uint8_t * q = x[i].qs + 32*il + n*ir; | |
| uint8_t sc, m; | |
| get_scale_min_k4(is + 0, x[i].scales, sc, m); | |
| const float d1 = dall * sc; const float m1 = dmin * m; | |
| get_scale_min_k4(is + 1, x[i].scales, sc, m); | |
| const float d2 = dall * sc; const float m2 = dmin * m; | |
| for (int l = 0; l < n; ++l) { | |
| y[l + 0] = d1 * (q[l] & 0xF) - m1; | |
| y[l +32] = d2 * (q[l] >> 4) - m2; | |
| } | |
| const int tid = threadIdx.x; | |
| const uint8_t * q = x[i].qs; | |
| dst_t * y = yy + i*QK_K; | |
| const float d = (float)x[i].dm[0]; | |
| const float m = (float)x[i].dm[1]; | |
| y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4); | |
| y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4); | |
| } | |
| template<typename dst_t> | |
| static __global__ void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { | |
| const block_q5_K * x = (const block_q5_K *) vx; | |
| const int i = blockIdx.x; | |
| // assume 64 threads - this is very slightly better than the one below | |
| const int tid = threadIdx.x; | |
| const int il = tid/16; // il is in 0...3 | |
| const int ir = tid%16; // ir is in 0...15 | |
| const int is = 2*il; // is is in 0...6 | |
| dst_t * y = yy + i*QK_K + 64*il + 2*ir; | |
| const float dall = __low2half(x[i].dm); | |
| const float dmin = __high2half(x[i].dm); | |
| const uint8_t * ql = x[i].qs + 32*il + 2*ir; | |
| const uint8_t * qh = x[i].qh + 2*ir; | |
| uint8_t sc, m; | |
| get_scale_min_k4(is + 0, x[i].scales, sc, m); | |
| const float d1 = dall * sc; const float m1 = dmin * m; | |
| get_scale_min_k4(is + 1, x[i].scales, sc, m); | |
| const float d2 = dall * sc; const float m2 = dmin * m; | |
| uint8_t hm = 1 << (2*il); | |
| y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1; | |
| y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1; | |
| hm <<= 1; | |
| y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2; | |
| y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2; | |
| const int tid = threadIdx.x; | |
| const uint8_t q = x[i].qs[tid]; | |
| const int im = tid/8; // 0...3 | |
| const int in = tid%8; // 0...7 | |
| const int is = tid/16; // 0 or 1 | |
| const uint8_t h = x[i].qh[in] >> im; | |
| const float d = x[i].d; | |
| dst_t * y = yy + i*QK_K + tid; | |
| y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16)); | |
| y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16)); | |
| } | |
| template<typename dst_t> | |
| static __global__ void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { | |
| const block_q6_K * x = (const block_q6_K *) vx; | |
| const int i = blockIdx.x; | |
| // assume 64 threads - this is very slightly better than the one below | |
| const int tid = threadIdx.x; | |
| const int ip = tid/32; // ip is 0 or 1 | |
| const int il = tid - 32*ip; // 0...32 | |
| const int is = 8*ip + il/16; | |
| dst_t * y = yy + i*QK_K + 128*ip + il; | |
| const float d = x[i].d; | |
| const uint8_t * ql = x[i].ql + 64*ip + il; | |
| const uint8_t qh = x[i].qh[32*ip + il]; | |
| const int8_t * sc = x[i].scales + is; | |
| y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32); | |
| y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32); | |
| y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32); | |
| y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32); | |
| // assume 32 threads | |
| const int tid = threadIdx.x; | |
| const int ip = tid/16; // 0 or 1 | |
| const int il = tid - 16*ip; // 0...15 | |
| dst_t * y = yy + i*QK_K + 16*ip + il; | |
| const float d = x[i].d; | |
| const uint8_t ql = x[i].ql[16*ip + il]; | |
| const uint8_t qh = x[i].qh[il] >> (2*ip); | |
| const int8_t * sc = x[i].scales; | |
| y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32); | |
| y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32); | |
| } | |
| static const __device__ uint64_t iq2xxs_grid[256] = { | |
| 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, | |
| 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808, | |
| 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819, | |
| 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819, | |
| 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b, | |
| 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808, | |
| 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08, | |
| 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b, | |
| 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819, | |
| 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08, | |
| 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, | |
| 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08, | |
| 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808, | |
| 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808, | |
| 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919, | |
| 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819, | |
| 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08, | |
| 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908, | |
| 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819, | |
| 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808, | |
| 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808, | |
| 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908, | |
| 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808, | |
| 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08, | |
| 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819, | |
| 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819, | |
| 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819, | |
| 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908, | |
| 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19, | |
| 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819, | |
| 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b, | |
| 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808, | |
| 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908, | |
| 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08, | |
| 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08, | |
| 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908, | |
| 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819, | |
| 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808, | |
| 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808, | |
| 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19, | |
| 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819, | |
| 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, | |
| 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b, | |
| 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08, | |
| 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808, | |
| 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908, | |
| 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b, | |
| 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819, | |
| 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08, | |
| 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08, | |
| 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808, | |
| 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b, | |
| 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b, | |
| 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908, | |
| 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819, | |
| 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808, | |
| 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908, | |
| 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b, | |
| 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808, | |
| 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b, | |
| 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b, | |
| 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808, | |
| 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19, | |
| 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908, | |
| }; | |
| static const __device__ uint64_t iq2xs_grid[512] = { | |
| 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, | |
| 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, | |
| 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, | |
| 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, | |
| 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, | |
| 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808, | |
| 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819, | |
| 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819, | |
| 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, | |
| 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b, | |
| 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b, | |
| 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908, | |
| 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908, | |
| 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919, | |
| 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808, | |
| 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919, | |
| 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908, | |
| 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, | |
| 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, | |
| 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08, | |
| 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808, | |
| 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808, | |
| 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819, | |
| 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908, | |
| 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819, | |
| 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808, | |
| 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b, | |
| 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819, | |
| 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819, | |
| 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808, | |
| 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908, | |
| 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19, | |
| 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b, | |
| 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b, | |
| 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919, | |
| 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808, | |
| 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819, | |
| 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819, | |
| 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b, | |
| 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908, | |
| 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808, | |
| 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819, | |
| 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808, | |
| 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, | |
| 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808, | |
| 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808, | |
| 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908, | |
| 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908, | |
| 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808, | |
| 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b, | |
| 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819, | |
| 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, | |
| 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908, | |
| 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808, | |
| 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908, | |
| 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919, | |
| 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08, | |
| 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19, | |
| 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b, | |
| 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b, | |
| 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808, | |
| 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08, | |
| 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b, | |
| 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908, | |
| 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b, | |
| 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908, | |
| 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, | |
| 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808, | |
| 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808, | |
| 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08, | |
| 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819, | |
| 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919, | |
| 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808, | |
| 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808, | |
| 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819, | |
| 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819, | |
| 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908, | |
| 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908, | |
| 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b, | |
| 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908, | |
| 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908, | |
| 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908, | |
| 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808, | |
| 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, | |
| 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819, | |
| 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819, | |
| 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808, | |
| 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b, | |
| 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819, | |
| 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819, | |
| 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08, | |
| 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808, | |
| 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19, | |
| 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919, | |
| 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, | |
| 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19, | |
| 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b, | |
| 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808, | |
| 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b, | |
| 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b, | |
| 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, | |
| 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b, | |
| 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808, | |
| 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819, | |
| 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808, | |
| 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808, | |
| 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08, | |
| 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b, | |
| 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19, | |
| 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08, | |
| 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919, | |
| 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08, | |
| 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08, | |
| 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908, | |
| 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908, | |
| 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b, | |
| 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908, | |
| 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808, | |
| 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b, | |
| 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808, | |
| 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808, | |
| 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19, | |
| 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08, | |
| 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808, | |
| 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b, | |
| 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808, | |
| 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b, | |
| 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b, | |
| }; | |
| static const __device__ uint8_t ksigns_iq2xs[128] = { | |
| 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15, | |
| 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159, | |
| 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175, | |
| 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63, | |
| 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207, | |
| 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95, | |
| 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111, | |
| 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255, | |
| }; | |
| static const __device__ uint8_t kmask_iq2xs[8] = {1, 2, 4, 8, 16, 32, 64, 128}; | |
| inline bool ggml_cuda_supports_mmq(enum ggml_type type) { | |
| switch (type) { | |
| case GGML_TYPE_Q4_0: | |
| case GGML_TYPE_Q4_1: | |
| case GGML_TYPE_Q5_0: | |
| case GGML_TYPE_Q5_1: | |
| case GGML_TYPE_Q8_0: | |
| case GGML_TYPE_Q2_K: | |
| case GGML_TYPE_Q3_K: | |
| case GGML_TYPE_Q4_K: | |
| case GGML_TYPE_Q5_K: | |
| case GGML_TYPE_Q6_K: | |
| return true; | |
| default: | |
| return false; | |
| } | |
| } | |
| template<typename dst_t> | |
| static __global__ void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy) { | |
| const int i = blockIdx.x; | |
| const block_iq2_xxs * x = (const block_iq2_xxs *) vx; | |
| const int tid = threadIdx.x; | |
| const int il = tid/8; // 0...3 | |
| const int ib = tid%8; // 0...7 | |
| dst_t * y = yy + i*QK_K + 32*ib + 8*il; | |
| const uint16_t * q2 = x[i].qs + 4*ib; | |
| const uint8_t * aux8 = (const uint8_t *)q2; | |
| const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[il]); | |
| const uint32_t aux32 = q2[2] | (q2[3] << 16); | |
| const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.25f; | |
| const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127]; | |
| for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); | |
| assert(false); | |
| } | |
| template<typename dst_t> | |
| static __global__ void dequantize_block_iq2_xs(const void * __restrict__ vx, dst_t * __restrict__ yy) { | |
| const int i = blockIdx.x; | |
| const block_iq2_xs * x = (const block_iq2_xs *) vx; | |
| const int tid = threadIdx.x; | |
| const int il = tid/8; // 0...3 | |
| const int ib = tid%8; // 0...7 | |
| dst_t * y = yy + i*QK_K + 32*ib + 8*il; | |
| const uint16_t * q2 = x[i].qs + 4*ib; | |
| const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511)); | |
| const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f; | |
| const uint8_t signs = ksigns_iq2xs[q2[il] >> 9]; | |
| for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); | |
| assert(false); | |
| } | |
| static __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { | |
| static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); | |
| const int row = blockIdx.x*blockDim.y + threadIdx.y; | |
| if (row > nrows) return; | |
| const int num_blocks_per_row = ncols / QK_K; | |
| const int ib0 = row*num_blocks_per_row; | |
| const block_q2_K * x = (const block_q2_K *)vx + ib0; | |
| float tmp = 0; // partial sum for thread in warp | |
| const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15 | |
| const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 | |
| const int step = 16/K_QUANTS_PER_ITERATION; | |
| const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... | |
| const int in = tid - step*im; // 0...15 or 0...7 | |
| const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2 | |
| const int q_offset = 32*im + l0; | |
| const int s_offset = 8*im; | |
| const int y_offset = 128*im + l0; | |
| uint32_t aux[4]; | |
| const uint8_t * d = (const uint8_t *)aux; | |
| const uint8_t * m = (const uint8_t *)(aux + 2); | |
| for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { | |
| const float * y = yy + i * QK_K + y_offset; | |
| const uint8_t * q = x[i].qs + q_offset; | |
| const float dall = __low2half(x[i].dm); | |
| const float dmin = __high2half(x[i].dm); | |
| const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset); | |
| aux[0] = a[0] & 0x0f0f0f0f; | |
| aux[1] = a[1] & 0x0f0f0f0f; | |
| aux[2] = (a[0] >> 4) & 0x0f0f0f0f; | |
| aux[3] = (a[1] >> 4) & 0x0f0f0f0f; | |
| float sum1 = 0, sum2 = 0; | |
| for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { | |
| sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3) | |
| + y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3) | |
| + y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3) | |
| + y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3) | |
| + y[l+16] * d[1] * ((q[l+16] >> 0) & 3) | |
| + y[l+48] * d[3] * ((q[l+16] >> 2) & 3) | |
| + y[l+80] * d[5] * ((q[l+16] >> 4) & 3) | |
| +y[l+112] * d[7] * ((q[l+16] >> 6) & 3); | |
| sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6] | |
| + y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7]; | |
| } | |
| tmp += dall * sum1 - dmin * sum2; | |
| } | |
| const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 | |
| const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 | |
| const int offset = tid * K_QUANTS_PER_ITERATION; | |
| uint32_t uaux[2]; | |
| const uint8_t * d = (const uint8_t *)uaux; | |
| for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { | |
| const float * y = yy + i * QK_K + offset; | |
| const uint8_t * q = x[i].qs + offset; | |
| const uint32_t * s = (const uint32_t *)x[i].scales; | |
| uaux[0] = s[0] & 0x0f0f0f0f; | |
| uaux[1] = (s[0] >> 4) & 0x0f0f0f0f; | |
| const float2 dall = __half22float2(x[i].dm); | |
| float sum1 = 0, sum2 = 0; | |
| for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { | |
| const uint8_t ql = q[l]; | |
| sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3) | |
| + y[l+16] * d[1] * ((ql >> 2) & 3) | |
| + y[l+32] * d[2] * ((ql >> 4) & 3) | |
| + y[l+48] * d[3] * ((ql >> 6) & 3); | |
| sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7]; | |
| } | |
| tmp += dall.x * sum1 - dall.y * sum2; | |
| } | |
| // sum up partial sums and write back result | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); | |
| } | |
| if (threadIdx.x == 0) { | |
| dst[row] = tmp; | |
| } | |
| } | |
| static __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { | |
| const int row = blockIdx.x*blockDim.y + threadIdx.y; | |
| if (row > nrows) return; | |
| const int num_blocks_per_row = ncols / QK_K; | |
| const int ib0 = row*num_blocks_per_row; | |
| const block_q3_K * x = (const block_q3_K *)vx + ib0; | |
| float tmp = 0; // partial sum for thread in warp | |
| const uint16_t kmask1 = 0x0303; | |
| const uint16_t kmask2 = 0x0f0f; | |
| const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 | |
| const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 | |
| const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop | |
| const int step = 16/K_QUANTS_PER_ITERATION; | |
| const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... | |
| const int in = tid - step*im; // 0....15 or 0...7 | |
| const uint8_t m = 1 << (4*im); | |
| const int l0 = n*in; // 0...15 or 0...14 in steps of 2 | |
| const int q_offset = 32*im + l0; | |
| const int y_offset = 128*im + l0; | |
| uint16_t utmp[4]; | |
| const int8_t * s = (const int8_t *)utmp; | |
| const uint16_t s_shift = 4*im; | |
| for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { | |
| const float * y = yy + i * QK_K + y_offset; | |
| const uint8_t * q = x[i].qs + q_offset; | |
| const uint8_t * h = x[i].hmask + l0; | |
| const uint16_t * a = (const uint16_t *)x[i].scales; | |
| utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4); | |
| utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4); | |
| utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4); | |
| utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4); | |
| const float d = x[i].d; | |
| float sum = 0; | |
| for (int l = 0; l < n; ++l) { | |
| sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4)) | |
| + y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4)) | |
| + y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4)) | |
| + y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4)); | |
| sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4)) | |
| + y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4)) | |
| + y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4)) | |
| + y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4)); | |
| } | |
| tmp += d * sum; | |
| } | |
| const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 | |
| const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 | |
| const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14 | |
| const int in = offset/8; // 0 or 1 | |
| const int im = offset%8; // 0...7 | |
| for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { | |
| const float * y = yy + i * QK_K + offset; | |
| const uint8_t * q = x[i].qs + offset; | |
| const uint8_t * s = x[i].scales; | |
| const float dall = (float)x[i].d; | |
| float sum = 0; | |
| for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { | |
| const uint8_t hl = x[i].hmask[im+l] >> in; | |
| const uint8_t ql = q[l]; | |
| sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4)) | |
| + y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4)) | |
| + y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4)) | |
| + y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4)); | |
| } | |
| tmp += sum; | |
| } | |
| // sum up partial sums and write back result | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); | |
| } | |
| if (threadIdx.x == 0) { | |
| dst[row] = tmp; | |
| } | |
| } | |
| static __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { | |
| const int row = blockIdx.x*blockDim.y + threadIdx.y; | |
| if (row > nrows) return; | |
| const int num_blocks_per_row = ncols / QK_K; | |
| const int ib0 = row*num_blocks_per_row; | |
| const block_q4_K * x = (const block_q4_K *)vx + ib0; | |
| const uint16_t kmask1 = 0x3f3f; | |
| const uint16_t kmask2 = 0x0f0f; | |
| const uint16_t kmask3 = 0xc0c0; | |
| const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 | |
| const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 | |
| const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4 | |
| const int il = tid/step; // 0...3 | |
| const int ir = tid - step*il; // 0...7 or 0...3 | |
| const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4 | |
| const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 | |
| const int in = il%2; | |
| const int l0 = n*(2*ir + in); | |
| const int q_offset = 32*im + l0; | |
| const int y_offset = 64*im + l0; | |
| uint16_t aux[4]; | |
| const uint8_t * sc = (const uint8_t *)aux; | |
| uint32_t q32[4]; | |
| const uint8_t * q4 = (const uint8_t *)q32; | |
| uint16_t q16[4]; | |
| const uint8_t * q4 = (const uint8_t *)q16; | |
| float tmp = 0; // partial sum for thread in warp | |
| for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { | |
| const float * y1 = yy + i*QK_K + y_offset; | |
| const float * y2 = y1 + 128; | |
| const float dall = __low2half(x[i].dm); | |
| const float dmin = __high2half(x[i].dm); | |
| const uint16_t * a = (const uint16_t *)x[i].scales; | |
| aux[0] = a[im+0] & kmask1; | |
| aux[1] = a[im+2] & kmask1; | |
| aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); | |
| aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); | |
| const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset); | |
| const uint32_t * q2 = q1 + 16; | |
| q32[0] = q1[0] & 0x0f0f0f0f; | |
| q32[1] = q1[0] & 0xf0f0f0f0; | |
| q32[2] = q2[0] & 0x0f0f0f0f; | |
| q32[3] = q2[0] & 0xf0f0f0f0; | |
| float4 s = {0.f, 0.f, 0.f, 0.f}; | |
| float smin = 0; | |
| for (int l = 0; l < 4; ++l) { | |
| s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+ 4]; | |
| s.z += y2[l] * q4[l+8]; s.w += y2[l+32] * q4[l+12]; | |
| smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; | |
| } | |
| tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; | |
| const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset); | |
| const uint16_t * q2 = q1 + 32; | |
| q16[0] = q1[0] & 0x0f0f; | |
| q16[1] = q1[0] & 0xf0f0; | |
| q16[2] = q2[0] & 0x0f0f; | |
| q16[3] = q2[0] & 0xf0f0; | |
| float4 s = {0.f, 0.f, 0.f, 0.f}; | |
| float smin = 0; | |
| for (int l = 0; l < 2; ++l) { | |
| s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2]; | |
| s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6]; | |
| smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; | |
| } | |
| tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; | |
| } | |
| const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 | |
| const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); | |
| const int step = tid * K_QUANTS_PER_ITERATION; | |
| uint16_t aux16[2]; | |
| const uint8_t * s = (const uint8_t *)aux16; | |
| float tmp = 0; | |
| for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { | |
| const uint8_t * q = x[i].qs + step; | |
| const float * y = yy + i*QK_K + step; | |
| const uint16_t * a = (const uint16_t *)x[i].scales; | |
| aux16[0] = a[0] & 0x0f0f; | |
| aux16[1] = (a[0] >> 4) & 0x0f0f; | |
| const float d = (float)x[i].dm[0]; | |
| const float m = (float)x[i].dm[1]; | |
| float sum = 0.f; | |
| for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { | |
| sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2]) | |
| + y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2]) | |
| + y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3]) | |
| + y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]); | |
| } | |
| tmp += sum; | |
| } | |
| // sum up partial sums and write back result | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); | |
| } | |
| if (tid == 0) { | |
| dst[row] = tmp; | |
| } | |
| } | |
| static __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols) { | |
| const int row = blockIdx.x; | |
| const int num_blocks_per_row = ncols / QK_K; | |
| const int ib0 = row*num_blocks_per_row; | |
| const block_q5_K * x = (const block_q5_K *)vx + ib0; | |
| float tmp = 0; // partial sum for thread in warp | |
| const uint16_t kmask1 = 0x3f3f; | |
| const uint16_t kmask2 = 0x0f0f; | |
| const uint16_t kmask3 = 0xc0c0; | |
| const int tid = threadIdx.x/2; // 0...15 | |
| const int ix = threadIdx.x%2; | |
| const int il = tid/4; // 0...3 | |
| const int ir = tid - 4*il;// 0...3 | |
| const int n = 2; | |
| const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 | |
| const int in = il%2; | |
| const int l0 = n*(2*ir + in); | |
| const int q_offset = 32*im + l0; | |
| const int y_offset = 64*im + l0; | |
| const uint8_t hm1 = 1 << (2*im); | |
| const uint8_t hm2 = hm1 << 4; | |
| uint16_t aux[4]; | |
| const uint8_t * sc = (const uint8_t *)aux; | |
| uint16_t q16[8]; | |
| const uint8_t * q4 = (const uint8_t *)q16; | |
| for (int i = ix; i < num_blocks_per_row; i += 2) { | |
| const uint8_t * ql1 = x[i].qs + q_offset; | |
| const uint8_t * qh = x[i].qh + l0; | |
| const float * y1 = yy + i*QK_K + y_offset; | |
| const float * y2 = y1 + 128; | |
| const float dall = __low2half(x[i].dm); | |
| const float dmin = __high2half(x[i].dm); | |
| const uint16_t * a = (const uint16_t *)x[i].scales; | |
| aux[0] = a[im+0] & kmask1; | |
| aux[1] = a[im+2] & kmask1; | |
| aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); | |
| aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); | |
| float4 sum = {0.f, 0.f, 0.f, 0.f}; | |
| float smin = 0; | |
| const uint16_t * q1 = (const uint16_t *)ql1; | |
| const uint16_t * q2 = q1 + 32; | |
| q16[0] = q1[0] & 0x0f0f; | |
| q16[1] = q1[8] & 0x0f0f; | |
| q16[2] = (q1[0] >> 4) & 0x0f0f; | |
| q16[3] = (q1[8] >> 4) & 0x0f0f; | |
| q16[4] = q2[0] & 0x0f0f; | |
| q16[5] = q2[8] & 0x0f0f; | |
| q16[6] = (q2[0] >> 4) & 0x0f0f; | |
| q16[7] = (q2[8] >> 4) & 0x0f0f; | |
| for (int l = 0; l < n; ++l) { | |
| sum.x += y1[l+ 0] * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0)) | |
| + y1[l+16] * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0)); | |
| sum.y += y1[l+32] * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0)) | |
| + y1[l+48] * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0)); | |
| sum.z += y2[l+ 0] * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0)) | |
| + y2[l+16] * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0)); | |
| sum.w += y2[l+32] * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0)) | |
| + y2[l+48] * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0)); | |
| smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3] | |
| + (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7]; | |
| } | |
| tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin; | |
| } | |
| const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 | |
| const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); | |
| const int step = tid * K_QUANTS_PER_ITERATION; | |
| const int im = step/8; | |
| const int in = step%8; | |
| for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { | |
| const uint8_t * q = x[i].qs + step; | |
| const int8_t * s = x[i].scales; | |
| const float * y = yy + i*QK_K + step; | |
| const float d = x[i].d; | |
| float sum = 0.f; | |
| for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { | |
| const uint8_t h = x[i].qh[in+j] >> im; | |
| sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16)) | |
| + y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16)) | |
| + y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16)) | |
| + y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16)); | |
| } | |
| tmp += sum; | |
| } | |
| // sum up partial sums and write back result | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); | |
| } | |
| if (threadIdx.x == 0) { | |
| dst[row] = tmp; | |
| } | |
| } | |
| static __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { | |
| static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); | |
| const int row = blockIdx.x*blockDim.y + threadIdx.y; | |
| if (row > nrows) return; | |
| const int num_blocks_per_row = ncols / QK_K; | |
| const int ib0 = row*num_blocks_per_row; | |
| const block_q6_K * x = (const block_q6_K *)vx + ib0; | |
| const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 | |
| const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1 | |
| const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8 | |
| const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... | |
| const int in = tid - step*im; // 0...15 or 0...7 | |
| const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 | |
| const int is = 0; | |
| const int l0 = 4 * in; // 0, 4, 8, ..., 28 | |
| const int is = in / 4; | |
| const int ql_offset = 64*im + l0; | |
| const int qh_offset = 32*im + l0; | |
| const int s_offset = 8*im + is; | |
| const int y_offset = 128*im + l0; | |
| float tmp = 0; // partial sum for thread in warp | |
| for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { | |
| const float * y = yy + i * QK_K + y_offset; | |
| const uint8_t * ql = x[i].ql + ql_offset; | |
| const uint8_t * qh = x[i].qh + qh_offset; | |
| const int8_t * s = x[i].scales + s_offset; | |
| const float d = x[i].d; | |
| float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32) | |
| + y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32) | |
| + y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32) | |
| + y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32) | |
| + y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32) | |
| + y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32) | |
| + y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32) | |
| +y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32); | |
| tmp += sum; | |
| float sum = 0; | |
| for (int l = 0; l < 4; ++l) { | |
| sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32) | |
| + y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32) | |
| + y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32) | |
| + y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32); | |
| } | |
| tmp += sum; | |
| } | |
| const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...7 | |
| const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0...3 | |
| const int step = tid * K_QUANTS_PER_ITERATION; | |
| float tmp = 0; // partial sum for thread in warp | |
| for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { | |
| const float * y = yy + i * QK_K + step; | |
| const uint8_t * ql = x[i].ql + step; | |
| const uint8_t * qh = x[i].qh + step; | |
| const int8_t * s = x[i].scales; | |
| const float d = x[i+0].d; | |
| float sum = 0; | |
| for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { | |
| sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32) | |
| + y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32) | |
| + y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32) | |
| + y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32); | |
| } | |
| tmp += sum; | |
| } | |
| // sum up partial sums and write back result | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); | |
| } | |
| if (tid == 0) { | |
| dst[row] = tmp; | |
| } | |
| } | |
| static __device__ void convert_f16(const void * vx, const int ib, const int iqs, dfloat2 & v){ | |
| const half * x = (const half *) vx; | |
| // automatic half -> float type cast if dfloat == float | |
| v.x = x[ib + iqs + 0]; | |
| v.y = x[ib + iqs + 1]; | |
| } | |
| static __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) { | |
| const int ix = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (ix >= kx_padded) { | |
| return; | |
| } | |
| const int iy = blockDim.y*blockIdx.y + threadIdx.y; | |
| const int i_padded = iy*kx_padded + ix; | |
| block_q8_1 * y = (block_q8_1 *) vy; | |
| const int ib = i_padded / QK8_1; // block index | |
| const int iqs = i_padded % QK8_1; // quant index | |
| const float xi = ix < kx ? x[iy*kx + ix] : 0.0f; | |
| float amax = fabsf(xi); | |
| float sum = xi; | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| amax = fmaxf(amax, __shfl_xor_sync(0xffffffff, amax, mask, 32)); | |
| sum += __shfl_xor_sync(0xffffffff, sum, mask, 32); | |
| } | |
| const float d = amax / 127; | |
| const int8_t q = amax == 0.0f ? 0 : roundf(xi / d); | |
| y[ib].qs[iqs] = q; | |
| if (iqs > 0) { | |
| return; | |
| } | |
| reinterpret_cast<half&>(y[ib].ds.x) = d; | |
| reinterpret_cast<half&>(y[ib].ds.y) = sum; | |
| } | |
| template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t> | |
| static __global__ void k_get_rows( | |
| const void * src0, const int32_t * src1, dst_t * dst, | |
| int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ | |
| /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ | |
| /*size_t s0,*/ size_t s1, size_t s2, size_t s3, | |
| /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, | |
| size_t s10, size_t s11, size_t s12/*, size_t s13*/) { | |
| const int i00 = (blockIdx.x*blockDim.x + threadIdx.x)*2; | |
| const int i10 = blockDim.y*blockIdx.y + threadIdx.y; | |
| const int i11 = (blockIdx.z*blockDim.z + threadIdx.z)/ne12; | |
| const int i12 = (blockIdx.z*blockDim.z + threadIdx.z)%ne12; | |
| if (i00 >= ne00) { | |
| return; | |
| } | |
| const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; | |
| dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; | |
| const void * src0_row = (const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03; | |
| const int ib = i00/qk; // block index | |
| const int iqs = (i00%qk)/qr; // quant index | |
| const int iybs = i00 - i00%qk; // dst block start index | |
| const int y_offset = qr == 1 ? 1 : qk/2; | |
| // dequantize | |
| dfloat2 v; | |
| dequantize_kernel(src0_row, ib, iqs, v); | |
| dst_row[iybs + iqs + 0] = v.x; | |
| dst_row[iybs + iqs + y_offset] = v.y; | |
| } | |
| template<typename src0_t, typename dst_t> | |
| static __global__ void k_get_rows_float( | |
| const src0_t * src0, const int32_t * src1, dst_t * dst, | |
| int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ | |
| /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ | |
| /*size_t s0,*/ size_t s1, size_t s2, size_t s3, | |
| /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, | |
| size_t s10, size_t s11, size_t s12/*, size_t s13*/) { | |
| const int i00 = blockIdx.x*blockDim.x + threadIdx.x; | |
| const int i10 = blockDim.y*blockIdx.y + threadIdx.y; | |
| const int i11 = (blockIdx.z*blockDim.z + threadIdx.z)/ne12; | |
| const int i12 = (blockIdx.z*blockDim.z + threadIdx.z)%ne12; | |
| if (i00 >= ne00) { | |
| return; | |
| } | |
| const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; | |
| dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; | |
| const src0_t * src0_row = (const src0_t *)((const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03); | |
| dst_row[i00] = src0_row[i00]; | |
| } | |
| template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t> | |
| static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) { | |
| const int i = 2*(blockDim.x*blockIdx.x + threadIdx.x); | |
| if (i >= k) { | |
| return; | |
| } | |
| const int ib = i/qk; // block index | |
| const int iqs = (i%qk)/qr; // quant index | |
| const int iybs = i - i%qk; // y block start index | |
| const int y_offset = qr == 1 ? 1 : qk/2; | |
| // dequantize | |
| dfloat2 v; | |
| dequantize_kernel(vx, ib, iqs, v); | |
| y[iybs + iqs + 0] = v.x; | |
| y[iybs + iqs + y_offset] = v.y; | |
| } | |
| template <typename src_t, typename dst_t> | |
| static __global__ void convert_unary(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| const src_t * x = (src_t *) vx; | |
| y[i] = x[i]; | |
| } | |
| // VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called | |
| // MMVQ = mul_mat_vec_q, MMQ = mul_mat_q | |
| template <int vdr> static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl( | |
| const int * v, const int * u, const float & d4, const half2 & ds8) { | |
| int sumi = 0; | |
| for (int i = 0; i < vdr; ++i) { | |
| const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; | |
| const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; | |
| // SIMD dot product of quantized values | |
| sumi = __dp4a(vi0, u[2*i+0], sumi); | |
| sumi = __dp4a(vi1, u[2*i+1], sumi); | |
| } | |
| const float2 ds8f = __half22float2(ds8); | |
| // second part effectively subtracts 8 from each quant value | |
| return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y); | |
| bad_arch(); | |
| } | |
| template <int vdr> static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl( | |
| const int * v, const int * u, const half2 & dm4, const half2 & ds8) { | |
| int sumi = 0; | |
| for (int i = 0; i < vdr; ++i) { | |
| const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; | |
| const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; | |
| // SIMD dot product of quantized values | |
| sumi = __dp4a(vi0, u[2*i+0], sumi); | |
| sumi = __dp4a(vi1, u[2*i+1], sumi); | |
| } | |
| const float2 tmp = __half22float2(__hmul2(dm4, ds8)); | |
| const float d4d8 = tmp.x; | |
| const float m4s8 = tmp.y; | |
| const float2 dm4f = __half22float2(dm4); | |
| const float2 ds8f = __half22float2(ds8); | |
| const float d4d8 = dm4f.x * ds8f.x; | |
| const float m4s8 = dm4f.y * ds8f.y; | |
| // scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it | |
| return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1)); | |
| bad_arch(); | |
| } | |
| template <int vdr> static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl( | |
| const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) { | |
| int sumi = 0; | |
| for (int i = 0; i < vdr; ++i) { | |
| int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits | |
| vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 | |
| vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 | |
| vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 | |
| vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 | |
| sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values | |
| int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits | |
| vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 | |
| vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 | |
| vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 | |
| vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 | |
| sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values | |
| } | |
| const float2 ds8f = __half22float2(ds8); | |
| // second part effectively subtracts 16 from each quant value | |
| return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y); | |
| bad_arch(); | |
| } | |
| template <int vdr> static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl( | |
| const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) { | |
| int sumi = 0; | |
| for (int i = 0; i < vdr; ++i) { | |
| int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits | |
| vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 | |
| vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 | |
| vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 | |
| vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 | |
| sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values | |
| int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits | |
| vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 | |
| vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 | |
| vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 | |
| vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 | |
| sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values | |
| } | |
| const float2 tmp = __half22float2(__hmul2(dm5, ds8)); | |
| const float d5d8 = tmp.x; | |
| const float m5s8 = tmp.y; | |
| const float2 dm5f = __half22float2(dm5); | |
| const float2 ds8f = __half22float2(ds8); | |
| const float d5d8 = dm5f.x * ds8f.x; | |
| const float m5s8 = dm5f.y * ds8f.y; | |
| // scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it | |
| return sumi*d5d8 + m5s8 / (QI5_1 / vdr); | |
| bad_arch(); | |
| } | |
| template <int vdr> static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl( | |
| const int * v, const int * u, const float & d8_0, const float & d8_1) { | |
| int sumi = 0; | |
| for (int i = 0; i < vdr; ++i) { | |
| // SIMD dot product of quantized values | |
| sumi = __dp4a(v[i], u[i], sumi); | |
| } | |
| return d8_0*d8_1 * sumi; | |
| bad_arch(); | |
| } | |
| template <int vdr> static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl( | |
| const int * v, const int * u, const half2 & dm8, const half2 & ds8) { | |
| int sumi = 0; | |
| for (int i = 0; i < vdr; ++i) { | |
| // SIMD dot product of quantized values | |
| sumi = __dp4a(v[i], u[i], sumi); | |
| } | |
| const float2 tmp = __half22float2(__hmul2(dm8, ds8)); | |
| const float d8d8 = tmp.x; | |
| const float m8s8 = tmp.y; | |
| const float2 dm8f = __half22float2(dm8); | |
| const float2 ds8f = __half22float2(ds8); | |
| const float d8d8 = dm8f.x * ds8f.x; | |
| const float m8s8 = dm8f.y * ds8f.y; | |
| // scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it | |
| return sumi*d8d8 + m8s8 / (QI8_1 / vdr); | |
| bad_arch(); | |
| } | |
| // contiguous v/x values | |
| static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( | |
| const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales, | |
| const half2 & dm2, const float * __restrict__ d8) { | |
| float sumf_d = 0.0f; | |
| float sumf_m = 0.0f; | |
| for (int i = 0; i < QR2_K; ++i) { | |
| const int sc = scales[2*i]; | |
| const int vi = (v >> (2*i)) & 0x03030303; | |
| sumf_d += d8[i] * (__dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product | |
| // fill int with 4x m | |
| int m = sc >> 4; | |
| m |= m << 8; | |
| m |= m << 16; | |
| sumf_m += d8[i] * __dp4a(m, u[i], 0); // multiply constant q2_K part with sum of q8_1 values | |
| } | |
| const float2 dm2f = __half22float2(dm2); | |
| return dm2f.x*sumf_d - dm2f.y*sumf_m; | |
| bad_arch(); | |
| } | |
| // contiguous u/y values | |
| static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( | |
| const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales, | |
| const half2 & dm2, const float & d8) { | |
| int sumi_d = 0; | |
| int sumi_m = 0; | |
| for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) { | |
| int sumi_d_sc = 0; | |
| const int sc = scales[i0 / (QI8_1/2)]; | |
| // fill int with 4x m | |
| int m = sc >> 4; | |
| m |= m << 8; | |
| m |= m << 16; | |
| for (int i = i0; i < i0 + QI8_1/2; ++i) { | |
| sumi_d_sc = __dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product | |
| sumi_m = __dp4a(m, u[i], sumi_m); // multiply sum of q8_1 values with m | |
| } | |
| sumi_d += sumi_d_sc * (sc & 0xF); | |
| } | |
| const float2 dm2f = __half22float2(dm2); | |
| return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m); | |
| bad_arch(); | |
| } | |
| // contiguous v/x values | |
| static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( | |
| const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales, | |
| const int & scale_offset, const float & d3, const float * __restrict__ d8) { | |
| float sumf = 0.0f; | |
| for (int i = 0; i < QR3_K; ++i) { | |
| const int isc = scale_offset + 2*i; | |
| const int isc_low = isc % (QK_K/32); | |
| const int sc_shift_low = 4 * (isc / (QK_K/32)); | |
| const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF; | |
| const int isc_high = isc % (QK_K/64); | |
| const int sc_shift_high = 2 * (isc / (QK_K/64)); | |
| const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4; | |
| const int sc = (sc_low | sc_high) - 32; | |
| const int vil = (vl >> (2*i)) & 0x03030303; | |
| const int vih = ((vh >> i) << 2) & 0x04040404; | |
| const int vi = __vsubss4(vil, vih); | |
| sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product | |
| } | |
| return d3 * sumf; | |
| bad_arch(); | |
| } | |
| // contiguous u/y values | |
| static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( | |
| const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales, | |
| const float & d3, const float & d8) { | |
| int sumi = 0; | |
| for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) { | |
| int sumi_sc = 0; | |
| for (int i = i0; i < i0 + QI8_1/2; ++i) { | |
| sumi_sc = __dp4a(v[i], u[i], sumi_sc); // SIMD dot product | |
| } | |
| sumi += sumi_sc * scales[i0 / (QI8_1/2)]; | |
| } | |
| return d3*d8 * sumi; | |
| bad_arch(); | |
| } | |
| // contiguous v/x values | |
| static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( | |
| const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, | |
| const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) { | |
| float sumf_d = 0.0f; | |
| float sumf_m = 0.0f; | |
| for (int i = 0; i < QR4_K; ++i) { | |
| const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F; | |
| const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F; | |
| const int dot1 = __dp4a(v1i, u[2*i+1], __dp4a(v0i, u[2*i+0], 0)); // SIMD dot product | |
| const int dot2 = __dp4a(0x01010101, u[2*i+1], __dp4a(0x01010101, u[2*i+0], 0)); // sum of u | |
| sumf_d += d8[i] * (dot1 * sc[i]); | |
| sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values | |
| } | |
| const float2 dm4f = __half22float2(dm4); | |
| return dm4f.x*sumf_d - dm4f.y*sumf_m; | |
| bad_arch(); | |
| } | |
| // contiguous u/y values | |
| static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( | |
| const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, | |
| const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { | |
| float sumf_d = 0.0f; | |
| float sumf_m = 0.0f; | |
| for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) { | |
| int sumi_d = 0; | |
| for (int j = 0; j < QI8_1; ++j) { | |
| sumi_d = __dp4a((v[j] >> (4*i)) & 0x0F0F0F0F, u[i*QI8_1 + j], sumi_d); // SIMD dot product | |
| } | |
| const float2 ds8f = __half22float2(ds8[i]); | |
| sumf_d += ds8f.x * (sc[i] * sumi_d); | |
| sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val | |
| } | |
| const float2 dm4f = __half22float2(dm4); | |
| return dm4f.x*sumf_d - dm4f.y*sumf_m; | |
| bad_arch(); | |
| } | |
| // contiguous v/x values | |
| static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( | |
| const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc, | |
| const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) { | |
| float sumf_d = 0.0f; | |
| float sumf_m = 0.0f; | |
| for (int i = 0; i < QR5_K; ++i) { | |
| const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F; | |
| const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F; | |
| const int vh0i = ((vh[0] >> i) << 4) & 0x10101010; | |
| const int vh1i = ((vh[1] >> i) << 4) & 0x10101010; | |
| const int v0i = vl0i | vh0i; | |
| const int v1i = vl1i | vh1i; | |
| const int dot1 = __dp4a(v0i, u[2*i+0], __dp4a(v1i, u[2*i+1], 0)); // SIMD dot product | |
| const int dot2 = __dp4a(0x01010101, u[2*i+0], __dp4a(0x01010101, u[2*i+1], 0)); // sum of u | |
| sumf_d += d8[i] * (dot1 * sc[i]); | |
| sumf_m += d8[i] * (dot2 * m[i]); | |
| } | |
| const float2 dm5f = __half22float2(dm5); | |
| return dm5f.x*sumf_d - dm5f.y*sumf_m; | |
| bad_arch(); | |
| } | |
| // contiguous u/y values | |
| static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( | |
| const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, | |
| const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { | |
| float sumf_d = 0.0f; | |
| float sumf_m = 0.0f; | |
| for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) { | |
| int sumi_d = 0; | |
| for (int j = 0; j < QI8_1; ++j) { | |
| sumi_d = __dp4a(v[i*QI8_1 + j], u[i*QI8_1 + j], sumi_d); // SIMD dot product | |
| } | |
| const float2 ds8f = __half22float2(ds8[i]); | |
| sumf_d += ds8f.x * (sc[i] * sumi_d); | |
| sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val | |
| } | |
| const float2 dm4f = __half22float2(dm4); | |
| return dm4f.x*sumf_d - dm4f.y*sumf_m; | |
| bad_arch(); | |
| } | |
| // contiguous v/x values | |
| static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( | |
| const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales, | |
| const float & d, const float * __restrict__ d8) { | |
| float sumf = 0.0f; | |
| for (int i = 0; i < QR6_K; ++i) { | |
| const int sc = scales[4*i]; | |
| const int vil = (vl >> (4*i)) & 0x0F0F0F0F; | |
| const int vih = ((vh >> (4*i)) << 4) & 0x30303030; | |
| const int vi = __vsubss4((vil | vih), 0x20202020); // vi = (vil | vih) - 32 | |
| sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product | |
| } | |
| return d*sumf; | |
| bad_arch(); | |
| } | |
| // contiguous u/y values | |
| static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq( | |
| const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc, | |
| const float & d6, const float * __restrict__ d8) { | |
| float sumf_d = 0.0f; | |
| for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) { | |
| int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale | |
| for (int i = i0; i < i0 + 2; ++i) { | |
| sumi_d.x = __dp4a(v[2*i+0], u[2*i+0], sumi_d.x); // SIMD dot product | |
| sumi_d.x = __dp4a(v[2*i+1], u[2*i+1], sumi_d.x); // SIMD dot product | |
| sumi_d.y = __dp4a(v[2*i+4], u[2*i+4], sumi_d.y); // SIMD dot product | |
| sumi_d.y = __dp4a(v[2*i+5], u[2*i+5], sumi_d.y); // SIMD dot product | |
| } | |
| sumf_d += d8[i0/4] * (sc[i0/2+0]*sumi_d.x + sc[i0/2+1]*sumi_d.y); | |
| } | |
| return d6 * sumf_d; | |
| bad_arch(); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q4_0_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq; | |
| int v[VDR_Q4_0_Q8_1_MMVQ]; | |
| int u[2*VDR_Q4_0_Q8_1_MMVQ]; | |
| for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) { | |
| v[i] = get_int_from_uint8(bq4_0->qs, iqs + i); | |
| u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); | |
| u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0); | |
| } | |
| return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, bq4_0->d, bq8_1->ds); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| (void)x_qh; (void)x_sc; | |
| __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; | |
| __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0]; | |
| *x_ql = tile_x_qs; | |
| *x_dm = (half2 *) tile_x_d; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| (void)x_qh; (void)x_sc; | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI4_0; | |
| const int kqsx = k % QI4_0; | |
| const block_q4_0 * bx0 = (const block_q4_0 *) vx; | |
| float * x_dmf = (float *) x_dm; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx; | |
| x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); | |
| // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d; | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI4_0; | |
| const int kbxd = k % blocks_per_tile_x_row; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) { | |
| int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d; | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| (void)x_qh; (void)x_sc; | |
| const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); | |
| const float * x_dmf = (const float *) x_dm; | |
| int u[2*VDR_Q4_0_Q8_1_MMQ]; | |
| for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) { | |
| u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; | |
| u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE]; | |
| } | |
| return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ> | |
| (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0], | |
| y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q4_1_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq; | |
| int v[VDR_Q4_1_Q8_1_MMVQ]; | |
| int u[2*VDR_Q4_1_Q8_1_MMVQ]; | |
| for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) { | |
| v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i); | |
| u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); | |
| u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1); | |
| } | |
| return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| (void)x_qh; (void)x_sc; | |
| __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y]; | |
| __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1]; | |
| *x_ql = tile_x_qs; | |
| *x_dm = tile_x_dm; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_1( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| (void)x_qh; (void)x_sc; | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI4_1; | |
| const int kqsx = k % QI4_1; | |
| const block_q4_1 * bx0 = (const block_q4_1 *) vx; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx; | |
| x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI4_1; | |
| const int kbxd = k % blocks_per_tile_x_row; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) { | |
| int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm; | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| (void)x_qh; (void)x_sc; | |
| const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); | |
| int u[2*VDR_Q4_1_Q8_1_MMQ]; | |
| for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) { | |
| u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; | |
| u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE]; | |
| } | |
| return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ> | |
| (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1], | |
| y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q5_0_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq; | |
| int vl[VDR_Q5_0_Q8_1_MMVQ]; | |
| int vh[VDR_Q5_0_Q8_1_MMVQ]; | |
| int u[2*VDR_Q5_0_Q8_1_MMVQ]; | |
| for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) { | |
| vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i); | |
| vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i)); | |
| u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); | |
| u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0); | |
| } | |
| return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, bq5_0->d, bq8_1->ds); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| (void)x_qh; (void)x_sc; | |
| __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; | |
| __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0]; | |
| *x_ql = tile_x_ql; | |
| *x_dm = (half2 *) tile_x_d; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_0( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| (void)x_qh; (void)x_sc; | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI5_0; | |
| const int kqsx = k % QI5_0; | |
| const block_q5_0 * bx0 = (const block_q5_0 *) vx; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx; | |
| const int ql = get_int_from_uint8(bxi->qs, kqsx); | |
| const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0)); | |
| int qs0 = (ql >> 0) & 0x0F0F0F0F; | |
| qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 | |
| qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 | |
| qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 | |
| qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 | |
| qs0 = __vsubss4(qs0, 0x10101010); // subtract 16 | |
| x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; | |
| int qs1 = (ql >> 4) & 0x0F0F0F0F; | |
| qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 | |
| qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 | |
| qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 | |
| qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 | |
| qs1 = __vsubss4(qs1, 0x10101010); // subtract 16 | |
| x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI5_0; | |
| const int kbxd = k % blocks_per_tile_x_row; | |
| float * x_dmf = (float *) x_dm; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) { | |
| int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d; | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| (void)x_qh; (void)x_sc; | |
| const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); | |
| const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0; | |
| const float * x_dmf = (const float *) x_dm; | |
| const float * y_df = (const float *) y_ds; | |
| int u[2*VDR_Q5_0_Q8_1_MMQ]; | |
| for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) { | |
| u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; | |
| u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE]; | |
| } | |
| return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ> | |
| (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q5_1_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq; | |
| int vl[VDR_Q5_1_Q8_1_MMVQ]; | |
| int vh[VDR_Q5_1_Q8_1_MMVQ]; | |
| int u[2*VDR_Q5_1_Q8_1_MMVQ]; | |
| for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) { | |
| vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i); | |
| vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i)); | |
| u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); | |
| u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1); | |
| } | |
| return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| (void)x_qh; (void)x_sc; | |
| __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; | |
| __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1]; | |
| *x_ql = tile_x_ql; | |
| *x_dm = tile_x_dm; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_1( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| (void)x_qh; (void)x_sc; | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI5_1; | |
| const int kqsx = k % QI5_1; | |
| const block_q5_1 * bx0 = (const block_q5_1 *) vx; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx; | |
| const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx); | |
| const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1)); | |
| int qs0 = (ql >> 0) & 0x0F0F0F0F; | |
| qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 | |
| qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 | |
| qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 | |
| qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 | |
| x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; | |
| int qs1 = (ql >> 4) & 0x0F0F0F0F; | |
| qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 | |
| qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 | |
| qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 | |
| qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 | |
| x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI5_1; | |
| const int kbxd = k % blocks_per_tile_x_row; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) { | |
| int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm; | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| (void)x_qh; (void)x_sc; | |
| const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); | |
| const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1; | |
| int u[2*VDR_Q5_1_Q8_1_MMQ]; | |
| for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) { | |
| u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; | |
| u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE]; | |
| } | |
| return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ> | |
| (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q8_0_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq; | |
| int v[VDR_Q8_0_Q8_1_MMVQ]; | |
| int u[VDR_Q8_0_Q8_1_MMVQ]; | |
| for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) { | |
| v[i] = get_int_from_int8(bq8_0->qs, iqs + i); | |
| u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); | |
| } | |
| return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, bq8_0->d, __low2half(bq8_1->ds)); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| (void)x_qh; (void)x_sc; | |
| __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; | |
| __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0]; | |
| *x_ql = tile_x_qs; | |
| *x_dm = (half2 *) tile_x_d; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q8_0( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| (void)x_qh; (void)x_sc; | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI8_0; | |
| const int kqsx = k % QI8_0; | |
| float * x_dmf = (float *) x_dm; | |
| const block_q8_0 * bx0 = (const block_q8_0 *) vx; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx; | |
| x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx); | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI8_0; | |
| const int kbxd = k % blocks_per_tile_x_row; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) { | |
| int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d; | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| (void)x_qh; (void)x_sc; | |
| const float * x_dmf = (const float *) x_dm; | |
| const float * y_df = (const float *) y_ds; | |
| return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ> | |
| (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0], | |
| y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q2_K_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q2_K * bq2_K = (const block_q2_K *) vbq; | |
| const int bq8_offset = QR2_K * (iqs / QI8_1); | |
| const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); | |
| const uint8_t * scales = bq2_K->scales + scale_offset; | |
| const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs); | |
| int u[QR2_K]; | |
| float d8[QR2_K]; | |
| for (int i = 0; i < QR2_K; ++ i) { | |
| u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1); | |
| d8[i] = __low2half(bq8_1[bq8_offset + i].ds); | |
| } | |
| return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| (void)x_qh; | |
| __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; | |
| __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K]; | |
| __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; | |
| *x_ql = tile_x_ql; | |
| *x_dm = tile_x_dm; | |
| *x_sc = tile_x_sc; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q2_K( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| (void)x_qh; | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI2_K; | |
| const int kqsx = k % QI2_K; | |
| const block_q2_K * bx0 = (const block_q2_K *) vx; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx; | |
| x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI2_K; | |
| const int kbxd = k % blocks_per_tile_x_row; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) { | |
| int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm; | |
| } | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { | |
| int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4); | |
| x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4)); | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| (void)x_qh; | |
| const int kbx = k / QI2_K; | |
| const int ky = (k % QI2_K) * QR2_K; | |
| const float * y_df = (const float *) y_ds; | |
| int v[QR2_K*VDR_Q2_K_Q8_1_MMQ]; | |
| const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2); | |
| const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2)); | |
| for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) { | |
| v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303; | |
| } | |
| const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4; | |
| const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE; | |
| return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q3_K_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q3_K * bq3_K = (const block_q3_K *) vbq; | |
| const int bq8_offset = QR3_K * (iqs / (QI3_K/2)); | |
| const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); | |
| const float d = bq3_K->d; | |
| const int vl = get_int_from_uint8(bq3_K->qs, iqs); | |
| // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted | |
| const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset; | |
| int u[QR3_K]; | |
| float d8[QR3_K]; | |
| for (int i = 0; i < QR3_K; ++i) { | |
| u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1); | |
| d8[i] = __low2half(bq8_1[bq8_offset + i].ds); | |
| } | |
| return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; | |
| __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K]; | |
| __shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2]; | |
| __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; | |
| *x_ql = tile_x_ql; | |
| *x_dm = tile_x_dm; | |
| *x_qh = tile_x_qh; | |
| *x_sc = tile_x_sc; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q3_K( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI3_K; | |
| const int kqsx = k % QI3_K; | |
| const block_q3_K * bx0 = (const block_q3_K *) vx; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx; | |
| x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI3_K; | |
| const int kbxd = k % blocks_per_tile_x_row; | |
| float * x_dmf = (float *) x_dm; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) { | |
| int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d; | |
| } | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) { | |
| int i = i0 + i_offset * 2 + k / (WARP_SIZE/2); | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2); | |
| // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted | |
| x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2)); | |
| } | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { | |
| int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4); | |
| const int ksc = k % (QI3_K/4); | |
| const int ksc_low = ksc % (QI3_K/8); | |
| const int shift_low = 4 * (ksc / (QI3_K/8)); | |
| const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F; | |
| const int ksc_high = QI3_K/8; | |
| const int shift_high = 2 * ksc; | |
| const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030; | |
| const int sc = __vsubss4(sc_low | sc_high, 0x20202020); | |
| x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc; | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| const int kbx = k / QI3_K; | |
| const int ky = (k % QI3_K) * QR3_K; | |
| const float * x_dmf = (const float *) x_dm; | |
| const float * y_df = (const float *) y_ds; | |
| const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4; | |
| int v[QR3_K*VDR_Q3_K_Q8_1_MMQ]; | |
| for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) { | |
| const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2); | |
| const int shift = 2 * ((ky % 32) / 8); | |
| const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303; | |
| const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8); | |
| const int vlh = (vh << 2) & 0x04040404; | |
| v[l] = __vsubss4(vll, vlh); | |
| } | |
| const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE; | |
| return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q4_K_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q4_K * bq4_K = (const block_q4_K *) vbq; | |
| int v[2]; | |
| int u[2*QR4_K]; | |
| float d8[QR4_K]; | |
| // iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6 | |
| const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2)); | |
| // iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12 | |
| // iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44 | |
| // iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76 | |
| // iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108 | |
| const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); | |
| v[0] = q4[0]; | |
| v[1] = q4[4]; | |
| const uint16_t * scales = (const uint16_t *)bq4_K->scales; | |
| uint16_t aux[2]; | |
| const int j = bq8_offset/2; | |
| if (j < 2) { | |
| aux[0] = scales[j+0] & 0x3f3f; | |
| aux[1] = scales[j+2] & 0x3f3f; | |
| } else { | |
| aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); | |
| aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); | |
| } | |
| const uint8_t * sc = (const uint8_t *)aux; | |
| const uint8_t * m = sc + 2; | |
| for (int i = 0; i < QR4_K; ++i) { | |
| const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; | |
| d8[i] = __low2half(bq8i->ds); | |
| const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); | |
| u[2*i+0] = q8[0]; | |
| u[2*i+1] = q8[4]; | |
| } | |
| return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8); | |
| const block_q4_K * bq4_K = (const block_q4_K *) vbq; | |
| float sumf_d = 0.0f; | |
| float sumf_m = 0.0f; | |
| uint16_t aux16[2]; | |
| const uint8_t * s = (const uint8_t *)aux16; | |
| const uint16_t * a = (const uint16_t *)bq4_K->scales; | |
| aux16[0] = a[0] & 0x0f0f; | |
| aux16[1] = (a[0] >> 4) & 0x0f0f; | |
| const float dall = bq4_K->dm[0]; | |
| const float dmin = bq4_K->dm[1]; | |
| const float d8_1 = __low2float(bq8_1[0].ds); | |
| const float d8_2 = __low2float(bq8_1[1].ds); | |
| const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2)); | |
| const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4); | |
| const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2)); | |
| const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4); | |
| const int * q4 = (const int *)bq4_K->qs + (iqs/2); | |
| const int v1 = q4[0]; | |
| const int v2 = q4[4]; | |
| const int dot1 = __dp4a(ui2, v2 & 0x0f0f0f0f, __dp4a(ui1, v1 & 0x0f0f0f0f, 0)); | |
| const int dot2 = __dp4a(ui4, (v2 >> 4) & 0x0f0f0f0f, __dp4a(ui3, (v1 >> 4) & 0x0f0f0f0f, 0)); | |
| const int dot3 = __dp4a(0x01010101, ui2, __dp4a(0x01010101, ui1, 0)); | |
| const int dot4 = __dp4a(0x01010101, ui4, __dp4a(0x01010101, ui3, 0)); | |
| sumf_d += d8_1 * (dot1 * s[0]) + d8_2 * (dot2 * s[1]); | |
| sumf_m += d8_1 * (dot3 * s[2]) + d8_2 * (dot4 * s[3]); | |
| return dall * sumf_d - dmin * sumf_m; | |
| bad_arch(); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| (void)x_qh; | |
| __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; | |
| __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K]; | |
| __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; | |
| *x_ql = tile_x_ql; | |
| *x_dm = tile_x_dm; | |
| *x_sc = tile_x_sc; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_K( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| (void)x_qh; | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI4_K; // == 0 if QK_K == 256 | |
| const int kqsx = k % QI4_K; // == k if QK_K == 256 | |
| const block_q4_K * bx0 = (const block_q4_K *) vx; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx; | |
| x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256 | |
| const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) { | |
| int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm; | |
| x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = {bxi->dm[0], bxi->dm[1]}; | |
| } | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { | |
| int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8); | |
| const int * scales = (const int *) bxi->scales; | |
| const int ksc = k % (WARP_SIZE/8); | |
| // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 | |
| int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits | |
| scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits | |
| x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| (void)x_qh; | |
| const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8); | |
| const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE; | |
| return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8, | |
| x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q5_K_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q5_K * bq5_K = (const block_q5_K *) vbq; | |
| int vl[2]; | |
| int vh[2]; | |
| int u[2*QR5_K]; | |
| float d8[QR5_K]; | |
| const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2)); | |
| const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); | |
| const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4)); | |
| vl[0] = ql[0]; | |
| vl[1] = ql[4]; | |
| vh[0] = qh[0] >> bq8_offset; | |
| vh[1] = qh[4] >> bq8_offset; | |
| const uint16_t * scales = (const uint16_t *)bq5_K->scales; | |
| uint16_t aux[2]; | |
| const int j = bq8_offset/2; | |
| if (j < 2) { | |
| aux[0] = scales[j+0] & 0x3f3f; | |
| aux[1] = scales[j+2] & 0x3f3f; | |
| } else { | |
| aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); | |
| aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); | |
| } | |
| const uint8_t * sc = (const uint8_t *)aux; | |
| const uint8_t * m = sc + 2; | |
| for (int i = 0; i < QR5_K; ++i) { | |
| const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; | |
| d8[i] = __low2float(bq8i->ds); | |
| const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); | |
| u[2*i+0] = q8[0]; | |
| u[2*i+1] = q8[4]; | |
| } | |
| return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8); | |
| const block_q5_K * bq5_K = (const block_q5_K *) vbq; | |
| const int8_t * s = bq5_K->scales; | |
| const float d = bq5_K->d; | |
| const float d8_1 = __low2half(bq8_1[0].ds); | |
| const float d8_2 = __low2half(bq8_1[1].ds); | |
| const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2)); | |
| const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4); | |
| const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2)); | |
| const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4); | |
| const int * ql = (const int *)bq5_K->qs + (iqs/2); | |
| const int vl1 = ql[0]; | |
| const int vl2 = ql[4]; | |
| const int step = 4 * (iqs/2); // 0, 4, 8, 12 | |
| const int im = step/8; // = 0 for iqs = 0, 2, = 1 for iqs = 4, 6 | |
| const int in = step%8; // 0, 4, 0, 4 | |
| const int vh = (*((const int *)(bq5_K->qh + in))) >> im; | |
| const int v1 = (((vh << 4) & 0x10101010) ^ 0x10101010) | ((vl1 >> 0) & 0x0f0f0f0f); | |
| const int v2 = (((vh << 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 0) & 0x0f0f0f0f); | |
| const int v3 = (((vh >> 0) & 0x10101010) ^ 0x10101010) | ((vl1 >> 4) & 0x0f0f0f0f); | |
| const int v4 = (((vh >> 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 4) & 0x0f0f0f0f); | |
| const float sumf_d = d8_1 * (__dp4a(ui1, v1, 0) * s[0] + __dp4a(ui2, v2, 0) * s[1]) | |
| + d8_2 * (__dp4a(ui3, v3, 0) * s[2] + __dp4a(ui4, v4, 0) * s[3]); | |
| return d * sumf_d; | |
| bad_arch(); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| (void)x_qh; | |
| __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; | |
| __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K]; | |
| __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; | |
| *x_ql = tile_x_ql; | |
| *x_dm = tile_x_dm; | |
| *x_sc = tile_x_sc; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_K( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| (void)x_qh; | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI5_K; // == 0 if QK_K == 256 | |
| const int kqsx = k % QI5_K; // == k if QK_K == 256 | |
| const block_q5_K * bx0 = (const block_q5_K *) vx; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx; | |
| const int ky = QR5_K*kqsx; | |
| const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx); | |
| const int ql0 = (ql >> 0) & 0x0F0F0F0F; | |
| const int ql1 = (ql >> 4) & 0x0F0F0F0F; | |
| const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4)); | |
| const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010; | |
| const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010; | |
| const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0; | |
| const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4); | |
| x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0; | |
| x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1; | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256 | |
| const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) { | |
| int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm; | |
| } | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { | |
| int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8); | |
| const int * scales = (const int *) bxi->scales; | |
| const int ksc = k % (WARP_SIZE/8); | |
| // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 | |
| int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits | |
| scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits | |
| x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| (void)x_qh; | |
| const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8); | |
| const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k; | |
| const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE; | |
| return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8, | |
| x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_q6_K_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_q6_K * bq6_K = (const block_q6_K *) vbq; | |
| const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4); | |
| const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8); | |
| const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4)); | |
| const int vl = get_int_from_uint8(bq6_K->ql, iqs); | |
| const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift; | |
| const int8_t * scales = bq6_K->scales + scale_offset; | |
| int u[QR6_K]; | |
| float d8[QR6_K]; | |
| for (int i = 0; i < QR6_K; ++i) { | |
| u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1); | |
| d8[i] = __low2half(bq8_1[bq8_offset + 2*i].ds); | |
| } | |
| return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8); | |
| } | |
| template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { | |
| (void)x_qh; | |
| __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; | |
| __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K]; | |
| __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; | |
| *x_ql = tile_x_ql; | |
| *x_dm = tile_x_dm; | |
| *x_sc = tile_x_sc; | |
| } | |
| template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q6_K( | |
| const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, | |
| int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { | |
| (void)x_qh; | |
| GGML_CUDA_ASSUME(i_offset >= 0); | |
| GGML_CUDA_ASSUME(i_offset < nwarps); | |
| GGML_CUDA_ASSUME(k >= 0); | |
| GGML_CUDA_ASSUME(k < WARP_SIZE); | |
| const int kbx = k / QI6_K; // == 0 if QK_K == 256 | |
| const int kqsx = k % QI6_K; // == k if QK_K == 256 | |
| const block_q6_K * bx0 = (const block_q6_K *) vx; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { | |
| int i = i0 + i_offset; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx; | |
| const int ky = QR6_K*kqsx; | |
| const int ql = get_int_from_uint8(bxi->ql, kqsx); | |
| const int ql0 = (ql >> 0) & 0x0F0F0F0F; | |
| const int ql1 = (ql >> 4) & 0x0F0F0F0F; | |
| const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4)); | |
| const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030; | |
| const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030; | |
| const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0; | |
| const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2); | |
| x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); | |
| x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); | |
| } | |
| const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256 | |
| const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 | |
| float * x_dmf = (float *) x_dm; | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) { | |
| int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd; | |
| x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d; | |
| } | |
| for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { | |
| int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; | |
| if (need_check) { | |
| i = min(i, i_max); | |
| } | |
| const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4; | |
| x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8)); | |
| } | |
| } | |
| static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat( | |
| const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, | |
| const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { | |
| (void)x_qh; | |
| const float * x_dmf = (const float *) x_dm; | |
| const float * y_df = (const float *) y_ds; | |
| const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]); | |
| const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k; | |
| const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE; | |
| return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]); | |
| } | |
| static __device__ __forceinline__ float vec_dot_iq2_xxs_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_iq2_xxs * bq2 = (const block_iq2_xxs *) vbq; | |
| const int ib32 = iqs; | |
| const uint16_t * q2 = bq2->qs + 4*ib32; | |
| const uint8_t * aux8 = (const uint8_t *)q2; | |
| const int8_t * q8 = bq8_1[ib32].qs; | |
| uint32_t aux32 = q2[2] | (q2[3] << 16); | |
| int sumi = 0; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); | |
| const uint8_t signs = ksigns_iq2xs[aux32 & 127]; | |
| for (int j = 0; j < 8; ++j) { | |
| sumi += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1); | |
| } | |
| q8 += 8; | |
| aux32 >>= 7; | |
| } | |
| const float d = (float)bq2->d * (0.5f + aux32) * (float)bq8_1[ib32].ds.x * 0.25f; | |
| return d * sumi; | |
| // iqs is 0...15 | |
| const int ib32 = iqs/2; | |
| const int il = iqs%2; | |
| const uint16_t * q2 = bq2->qs + 4*ib32; | |
| const uint8_t * aux8 = (const uint8_t *)q2; | |
| const uint8_t * grid1 = (const uint8_t *)(iq2xxs_grid + aux8[2*il+0]); | |
| const uint8_t * grid2 = (const uint8_t *)(iq2xxs_grid + aux8[2*il+1]); | |
| const uint32_t aux32 = q2[2] | (q2[3] << 16); | |
| const float d = (float)bq2->d * (0.5f + (aux32 >> 28)) * (float)bq8_1[ib32].ds.x * 0.25f; | |
| const uint8_t signs1 = ksigns_iq2xs[(aux32 >> 14*il) & 127]; | |
| const uint8_t signs2 = ksigns_iq2xs[(aux32 >> (14*il + 7)) & 127]; | |
| const int8_t * q8 = bq8_1[ib32].qs + 16*il; | |
| int sumi1 = 0, sumi2 = 0; | |
| for (int j = 0; j < 8; ++j) { | |
| sumi1 += q8[j+0] * grid1[j] * (signs1 & kmask_iq2xs[j] ? -1 : 1); | |
| sumi2 += q8[j+8] * grid2[j] * (signs2 & kmask_iq2xs[j] ? -1 : 1); | |
| } | |
| return d * (sumi1 + sumi2); | |
| assert(false); | |
| return 0.f; | |
| } | |
| static __device__ __forceinline__ float vec_dot_iq2_xs_q8_1( | |
| const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { | |
| const block_iq2_xs * bq2 = (const block_iq2_xs *) vbq; | |
| const int ib32 = iqs; | |
| const uint16_t * q2 = bq2->qs + 4*ib32; | |
| const int8_t * q8 = bq8_1[ib32].qs; | |
| const uint8_t ls1 = bq2->scales[ib32] & 0xf; | |
| const uint8_t ls2 = bq2->scales[ib32] >> 4; | |
| int sumi1 = 0; | |
| for (int l = 0; l < 2; ++l) { | |
| const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); | |
| const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; | |
| for (int j = 0; j < 8; ++j) { | |
| sumi1 += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1); | |
| } | |
| q8 += 8; | |
| } | |
| int sumi2 = 0; | |
| for (int l = 2; l < 4; ++l) { | |
| const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); | |
| const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; | |
| for (int j = 0; j < 8; ++j) { | |
| sumi2 += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1); | |
| } | |
| q8 += 8; | |
| } | |
| const float d = (float)bq2->d * (float)bq8_1[ib32].ds.x * 0.25f; | |
| return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2); | |
| assert(false); | |
| return 0.f; | |
| } | |
| template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps, | |
| allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot> | |
| static __device__ __forceinline__ void mul_mat_q( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const block_q_t * x = (const block_q_t *) vx; | |
| const block_q8_1 * y = (const block_q8_1 *) vy; | |
| const int blocks_per_row_x = ncols_x / qk; | |
| const int blocks_per_col_y = nrows_y / QK8_1; | |
| const int blocks_per_warp = WARP_SIZE / qi; | |
| const int & ncols_dst = ncols_y; | |
| const int row_dst_0 = blockIdx.x*mmq_y; | |
| const int & row_x_0 = row_dst_0; | |
| const int col_dst_0 = blockIdx.y*mmq_x; | |
| const int & col_y_0 = col_dst_0; | |
| int * tile_x_ql = nullptr; | |
| half2 * tile_x_dm = nullptr; | |
| int * tile_x_qh = nullptr; | |
| int * tile_x_sc = nullptr; | |
| allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc); | |
| __shared__ int tile_y_qs[mmq_x * WARP_SIZE]; | |
| __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1]; | |
| float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}}; | |
| for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) { | |
| load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, | |
| threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x); | |
| for (int ir = 0; ir < qr; ++ir) { | |
| const int kqs = ir*WARP_SIZE + threadIdx.x; | |
| const int kbxd = kqs / QI8_1; | |
| for (int i = 0; i < mmq_x; i += nwarps) { | |
| const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses | |
| const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd]; | |
| const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE; | |
| tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1); | |
| } | |
| for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) { | |
| const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x; | |
| const int kby = threadIdx.x % (WARP_SIZE/QI8_1); | |
| const int col_y_eff = min(col_y_0 + ids, ncols_y-1); | |
| // if the sum is not needed it's faster to transform the scale to f32 ahead of time | |
| const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds; | |
| half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby]; | |
| if (need_sum) { | |
| *dsi_dst = *dsi_src; | |
| } else { | |
| float * dfi_dst = (float *) dsi_dst; | |
| *dfi_dst = __low2half(*dsi_src); | |
| } | |
| } | |
| __syncthreads(); | |
| // #pragma unroll // unrolling this loop causes too much register pressure | |
| for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) { | |
| for (int j = 0; j < mmq_x; j += nwarps) { | |
| for (int i = 0; i < mmq_y; i += WARP_SIZE) { | |
| sum[i/WARP_SIZE][j/nwarps] += vec_dot( | |
| tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds, | |
| threadIdx.x + i, threadIdx.y + j, k); | |
| } | |
| } | |
| } | |
| __syncthreads(); | |
| } | |
| } | |
| for (int j = 0; j < mmq_x; j += nwarps) { | |
| const int col_dst = col_dst_0 + j + threadIdx.y; | |
| if (col_dst >= ncols_dst) { | |
| return; | |
| } | |
| for (int i = 0; i < mmq_y; i += WARP_SIZE) { | |
| const int row_dst = row_dst_0 + threadIdx.x + i; | |
| if (row_dst >= nrows_dst) { | |
| continue; | |
| } | |
| dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps]; | |
| } | |
| } | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q4_0_RDNA2, 2) | |
| mul_mat_q4_0( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q4_0_RDNA2; | |
| const int mmq_y = MMQ_Y_Q4_0_RDNA2; | |
| const int nwarps = NWARPS_Q4_0_RDNA2; | |
| const int mmq_x = MMQ_X_Q4_0_RDNA1; | |
| const int mmq_y = MMQ_Y_Q4_0_RDNA1; | |
| const int nwarps = NWARPS_Q4_0_RDNA1; | |
| mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>, | |
| load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q4_0_AMPERE; | |
| const int mmq_y = MMQ_Y_Q4_0_AMPERE; | |
| const int nwarps = NWARPS_Q4_0_AMPERE; | |
| mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>, | |
| load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q4_0_PASCAL; | |
| const int mmq_y = MMQ_Y_Q4_0_PASCAL; | |
| const int nwarps = NWARPS_Q4_0_PASCAL; | |
| mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>, | |
| load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q4_0_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q4_1_RDNA2, 2) | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q4_1_PASCAL, 2) | |
| mul_mat_q4_1( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q4_1_RDNA2; | |
| const int mmq_y = MMQ_Y_Q4_1_RDNA2; | |
| const int nwarps = NWARPS_Q4_1_RDNA2; | |
| const int mmq_x = MMQ_X_Q4_1_RDNA1; | |
| const int mmq_y = MMQ_Y_Q4_1_RDNA1; | |
| const int nwarps = NWARPS_Q4_1_RDNA1; | |
| mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>, | |
| load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q4_1_AMPERE; | |
| const int mmq_y = MMQ_Y_Q4_1_AMPERE; | |
| const int nwarps = NWARPS_Q4_1_AMPERE; | |
| mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>, | |
| load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q4_1_PASCAL; | |
| const int mmq_y = MMQ_Y_Q4_1_PASCAL; | |
| const int nwarps = NWARPS_Q4_1_PASCAL; | |
| mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>, | |
| load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q4_1_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q5_0_RDNA2, 2) | |
| mul_mat_q5_0( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q5_0_RDNA2; | |
| const int mmq_y = MMQ_Y_Q5_0_RDNA2; | |
| const int nwarps = NWARPS_Q5_0_RDNA2; | |
| const int mmq_x = MMQ_X_Q5_0_RDNA1; | |
| const int mmq_y = MMQ_Y_Q5_0_RDNA1; | |
| const int nwarps = NWARPS_Q5_0_RDNA1; | |
| mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>, | |
| load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q5_0_AMPERE; | |
| const int mmq_y = MMQ_Y_Q5_0_AMPERE; | |
| const int nwarps = NWARPS_Q5_0_AMPERE; | |
| mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>, | |
| load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q5_0_PASCAL; | |
| const int mmq_y = MMQ_Y_Q5_0_PASCAL; | |
| const int nwarps = NWARPS_Q5_0_PASCAL; | |
| mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>, | |
| load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q5_0_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q5_1_RDNA2, 2) | |
| mul_mat_q5_1( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q5_1_RDNA2; | |
| const int mmq_y = MMQ_Y_Q5_1_RDNA2; | |
| const int nwarps = NWARPS_Q5_1_RDNA2; | |
| const int mmq_x = MMQ_X_Q5_1_RDNA1; | |
| const int mmq_y = MMQ_Y_Q5_1_RDNA1; | |
| const int nwarps = NWARPS_Q5_1_RDNA1; | |
| mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>, | |
| load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q5_1_AMPERE; | |
| const int mmq_y = MMQ_Y_Q5_1_AMPERE; | |
| const int nwarps = NWARPS_Q5_1_AMPERE; | |
| mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>, | |
| load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q5_1_PASCAL; | |
| const int mmq_y = MMQ_Y_Q5_1_PASCAL; | |
| const int nwarps = NWARPS_Q5_1_PASCAL; | |
| mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>, | |
| load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q5_1_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q8_0_RDNA2, 2) | |
| mul_mat_q8_0( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q8_0_RDNA2; | |
| const int mmq_y = MMQ_Y_Q8_0_RDNA2; | |
| const int nwarps = NWARPS_Q8_0_RDNA2; | |
| const int mmq_x = MMQ_X_Q8_0_RDNA1; | |
| const int mmq_y = MMQ_Y_Q8_0_RDNA1; | |
| const int nwarps = NWARPS_Q8_0_RDNA1; | |
| mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>, | |
| load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q8_0_AMPERE; | |
| const int mmq_y = MMQ_Y_Q8_0_AMPERE; | |
| const int nwarps = NWARPS_Q8_0_AMPERE; | |
| mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>, | |
| load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q8_0_PASCAL; | |
| const int mmq_y = MMQ_Y_Q8_0_PASCAL; | |
| const int nwarps = NWARPS_Q8_0_PASCAL; | |
| mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>, | |
| load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q8_0_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q2_K_RDNA2, 2) | |
| mul_mat_q2_K( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q2_K_RDNA2; | |
| const int mmq_y = MMQ_Y_Q2_K_RDNA2; | |
| const int nwarps = NWARPS_Q2_K_RDNA2; | |
| const int mmq_x = MMQ_X_Q2_K_RDNA1; | |
| const int mmq_y = MMQ_Y_Q2_K_RDNA1; | |
| const int nwarps = NWARPS_Q2_K_RDNA1; | |
| mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>, | |
| load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q2_K_AMPERE; | |
| const int mmq_y = MMQ_Y_Q2_K_AMPERE; | |
| const int nwarps = NWARPS_Q2_K_AMPERE; | |
| mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>, | |
| load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q2_K_PASCAL; | |
| const int mmq_y = MMQ_Y_Q2_K_PASCAL; | |
| const int nwarps = NWARPS_Q2_K_PASCAL; | |
| mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>, | |
| load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q2_K_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q3_K_RDNA2, 2) | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q3_K_PASCAL, 2) | |
| mul_mat_q3_K( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q3_K_RDNA2; | |
| const int mmq_y = MMQ_Y_Q3_K_RDNA2; | |
| const int nwarps = NWARPS_Q3_K_RDNA2; | |
| const int mmq_x = MMQ_X_Q3_K_RDNA1; | |
| const int mmq_y = MMQ_Y_Q3_K_RDNA1; | |
| const int nwarps = NWARPS_Q3_K_RDNA1; | |
| mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>, | |
| load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q3_K_AMPERE; | |
| const int mmq_y = MMQ_Y_Q3_K_AMPERE; | |
| const int nwarps = NWARPS_Q3_K_AMPERE; | |
| mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>, | |
| load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q3_K_PASCAL; | |
| const int mmq_y = MMQ_Y_Q3_K_PASCAL; | |
| const int nwarps = NWARPS_Q3_K_PASCAL; | |
| mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>, | |
| load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q3_K_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q4_K_RDNA2, 2) | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q4_K_PASCAL, 2) | |
| mul_mat_q4_K( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q4_K_RDNA2; | |
| const int mmq_y = MMQ_Y_Q4_K_RDNA2; | |
| const int nwarps = NWARPS_Q4_K_RDNA2; | |
| const int mmq_x = MMQ_X_Q4_K_RDNA1; | |
| const int mmq_y = MMQ_Y_Q4_K_RDNA1; | |
| const int nwarps = NWARPS_Q4_K_RDNA1; | |
| mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>, | |
| load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q4_K_AMPERE; | |
| const int mmq_y = MMQ_Y_Q4_K_AMPERE; | |
| const int nwarps = NWARPS_Q4_K_AMPERE; | |
| mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>, | |
| load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q4_K_PASCAL; | |
| const int mmq_y = MMQ_Y_Q4_K_PASCAL; | |
| const int nwarps = NWARPS_Q4_K_PASCAL; | |
| mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>, | |
| load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q4_K_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q5_K_RDNA2, 2) | |
| mul_mat_q5_K( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q5_K_RDNA2; | |
| const int mmq_y = MMQ_Y_Q5_K_RDNA2; | |
| const int nwarps = NWARPS_Q5_K_RDNA2; | |
| const int mmq_x = MMQ_X_Q5_K_RDNA1; | |
| const int mmq_y = MMQ_Y_Q5_K_RDNA1; | |
| const int nwarps = NWARPS_Q5_K_RDNA1; | |
| mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>, | |
| load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q5_K_AMPERE; | |
| const int mmq_y = MMQ_Y_Q5_K_AMPERE; | |
| const int nwarps = NWARPS_Q5_K_AMPERE; | |
| mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>, | |
| load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q5_K_PASCAL; | |
| const int mmq_y = MMQ_Y_Q5_K_PASCAL; | |
| const int nwarps = NWARPS_Q5_K_PASCAL; | |
| mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>, | |
| load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q5_K_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <bool need_check> static __global__ void | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q6_K_RDNA2, 2) | |
| __launch_bounds__(WARP_SIZE*NWARPS_Q6_K_PASCAL, 2) | |
| mul_mat_q6_K( | |
| const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { | |
| const int mmq_x = MMQ_X_Q6_K_RDNA2; | |
| const int mmq_y = MMQ_Y_Q6_K_RDNA2; | |
| const int nwarps = NWARPS_Q6_K_RDNA2; | |
| const int mmq_x = MMQ_X_Q6_K_RDNA1; | |
| const int mmq_y = MMQ_Y_Q6_K_RDNA1; | |
| const int nwarps = NWARPS_Q6_K_RDNA1; | |
| mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>, | |
| load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q6_K_AMPERE; | |
| const int mmq_y = MMQ_Y_Q6_K_AMPERE; | |
| const int nwarps = NWARPS_Q6_K_AMPERE; | |
| mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>, | |
| load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| const int mmq_x = MMQ_X_Q6_K_PASCAL; | |
| const int mmq_y = MMQ_Y_Q6_K_PASCAL; | |
| const int nwarps = NWARPS_Q6_K_PASCAL; | |
| mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>, | |
| load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| (void) vec_dot_q6_K_q8_1_mul_mat; | |
| bad_arch(); | |
| } | |
| template <int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda> | |
| static __global__ void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols, const int nrows) { | |
| const int row = blockIdx.x*blockDim.y + threadIdx.y; | |
| if (row >= nrows) { | |
| return; | |
| } | |
| const int blocks_per_row = ncols / qk; | |
| const int blocks_per_warp = vdr * WARP_SIZE / qi; | |
| // partial sum for each thread | |
| float tmp = 0.0f; | |
| const block_q_t * x = (const block_q_t *) vx; | |
| const block_q8_1 * y = (const block_q8_1 *) vy; | |
| for (int i = 0; i < blocks_per_row; i += blocks_per_warp) { | |
| const int ibx = row*blocks_per_row + i + threadIdx.x / (qi/vdr); // x block index | |
| const int iby = (i + threadIdx.x / (qi/vdr)) * (qk/QK8_1); // y block index that aligns with ibx | |
| const int iqs = vdr * (threadIdx.x % (qi/vdr)); // x block quant index when casting the quants to int | |
| tmp += vec_dot_q_cuda(&x[ibx], &y[iby], iqs); | |
| } | |
| // sum up partial sums and write back result | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); | |
| } | |
| if (threadIdx.x == 0) { | |
| dst[row] = tmp; | |
| } | |
| } | |
| template <int qk, int qr, dequantize_kernel_t dequantize_kernel> | |
| static __global__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) { | |
| // qk = quantized weights per x block | |
| // qr = number of quantized weights per data value in x block | |
| const int row = blockIdx.x*blockDim.y + threadIdx.y; | |
| if (row >= nrows) { | |
| return; | |
| } | |
| const int tid = threadIdx.x; | |
| const int iter_stride = 2*GGML_CUDA_DMMV_X; | |
| const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter | |
| const int y_offset = qr == 1 ? 1 : qk/2; | |
| // partial sum for each thread | |
| half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics | |
| float tmp = 0.0f; | |
| for (int i = 0; i < ncols; i += iter_stride) { | |
| const int col = i + vals_per_iter*tid; | |
| const int ib = (row*ncols + col)/qk; // x block index | |
| const int iqs = (col%qk)/qr; // x quant index | |
| const int iybs = col - col%qk; // y block start index | |
| // processing >2 values per i iter is faster for fast GPUs | |
| for (int j = 0; j < vals_per_iter; j += 2) { | |
| // process 2 vals per j iter | |
| // dequantize | |
| // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val | |
| dfloat2 v; | |
| dequantize_kernel(vx, ib, iqs + j/qr, v); | |
| // matrix multiplication | |
| // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2 | |
| tmp += __hmul2(v, { | |
| y[iybs + iqs + j/qr + 0], | |
| y[iybs + iqs + j/qr + y_offset] | |
| }); | |
| tmp += v.x * y[iybs + iqs + j/qr + 0]; | |
| tmp += v.y * y[iybs + iqs + j/qr + y_offset]; | |
| } | |
| } | |
| // sum up partial sums and write back result | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); | |
| } | |
| if (tid == 0) { | |
| dst[row] = tmp.x + tmp.y; | |
| dst[row] = tmp; | |
| } | |
| } | |
| static __global__ void mul_mat_p021_f16_f32( | |
| const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst, | |
| const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y) { | |
| const half * x = (const half *) vx; | |
| const int row_x = blockDim.y*blockIdx.y + threadIdx.y; | |
| const int channel = blockDim.z*blockIdx.z + threadIdx.z; | |
| const int channel_x = channel / (nchannels_y / nchannels_x); | |
| const int nrows_y = ncols_x; | |
| const int nrows_dst = nrows_x; | |
| const int row_dst = row_x; | |
| float tmp = 0.0f; | |
| for (int col_x0 = 0; col_x0 < ncols_x; col_x0 += blockDim.x) { | |
| const int col_x = col_x0 + threadIdx.x; | |
| if (col_x >= ncols_x) { | |
| break; | |
| } | |
| // x is transposed and permuted | |
| const int ix = row_x*nchannels_x*ncols_x + channel_x*ncols_x + col_x; | |
| const float xi = __half2float(x[ix]); | |
| const int row_y = col_x; | |
| // y is not transposed but permuted | |
| const int iy = channel*nrows_y + row_y; | |
| tmp += xi * y[iy]; | |
| } | |
| // dst is not transposed and not permuted | |
| const int idst = channel*nrows_dst + row_dst; | |
| // sum up partial sums and write back result | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); | |
| } | |
| if (threadIdx.x == 0) { | |
| dst[idst] = tmp; | |
| } | |
| } | |
| static __global__ void mul_mat_vec_nc_f16_f32( // nc == non-contiguous | |
| const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst, const int ncols_x, const int nrows_x, | |
| const int row_stride_x, const int channel_stride_x, const int channel_x_divisor) { | |
| const half * x = (const half *) vx; | |
| const int row_x = blockDim.y*blockIdx.y + threadIdx.y; | |
| const int channel = blockDim.z*blockIdx.z + threadIdx.z; | |
| const int channel_x = channel / channel_x_divisor; | |
| const int nrows_y = ncols_x; | |
| const int nrows_dst = nrows_x; | |
| const int row_dst = row_x; | |
| const int idst = channel*nrows_dst + row_dst; | |
| float tmp = 0.0f; | |
| for (int col_x0 = 0; col_x0 < ncols_x; col_x0 += blockDim.x) { | |
| const int col_x = col_x0 + threadIdx.x; | |
| if (col_x >= ncols_x) { | |
| break; | |
| } | |
| const int row_y = col_x; | |
| const int ix = channel_x*channel_stride_x + row_x*row_stride_x + col_x; | |
| const int iy = channel*nrows_y + row_y; | |
| const float xi = __half2float(x[ix]); | |
| tmp += xi * y[iy]; | |
| } | |
| // sum up partial sums and write back result | |
| for (int mask = 16; mask > 0; mask >>= 1) { | |
| tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); | |
| } | |
| if (threadIdx.x == 0) { | |
| dst[idst] = tmp; | |
| } | |
| } | |
| static __device__ void cpy_1_f32_f32(const char * cxi, char * cdsti) { | |
| const float * xi = (const float *) cxi; | |
| float * dsti = (float *) cdsti; | |
| *dsti = *xi; | |
| } | |
| static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) { | |
| const float * xi = (const float *) cxi; | |
| half * dsti = (half *) cdsti; | |
| *dsti = __float2half(*xi); | |
| } | |
| static __device__ void cpy_1_f16_f16(const char * cxi, char * cdsti) { | |
| const half * xi = (const half *) cxi; | |
| half * dsti = (half *) cdsti; | |
| *dsti = *xi; | |
| } | |
| template <cpy_kernel_t cpy_1> | |
| static __global__ void cpy_f32_f16(const char * cx, char * cdst, const int ne, | |
| const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, | |
| const int ne10, const int ne11, const int nb10, const int nb11, const int nb12) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= ne) { | |
| return; | |
| } | |
| // determine indices i02/i12, i01/i11, i00/i10 as a function of index i of flattened tensor | |
| // then combine those indices with the corresponding byte offsets to get the total offsets | |
| const int i02 = i / (ne00*ne01); | |
| const int i01 = (i - i02*ne01*ne00) / ne00; | |
| const int i00 = i - i02*ne01*ne00 - i01*ne00; | |
| const int x_offset = i00*nb00 + i01*nb01 + i02*nb02; | |
| const int i12 = i / (ne10*ne11); | |
| const int i11 = (i - i12*ne10*ne11) / ne10; | |
| const int i10 = i - i12*ne10*ne11 - i11*ne10; | |
| const int dst_offset = i10*nb10 + i11*nb11 + i12*nb12; | |
| cpy_1(cx + x_offset, cdst + dst_offset); | |
| } | |
| static __device__ void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) { | |
| const float * xi = (const float *) cxi; | |
| block_q8_0 * dsti = (block_q8_0 *) cdsti; | |
| float amax = 0.0f; // absolute max | |
| for (int j = 0; j < QK8_0; j++) { | |
| const float v = xi[j]; | |
| amax = fmaxf(amax, fabsf(v)); | |
| } | |
| const float d = amax / ((1 << 7) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| dsti->d = d; | |
| for (int j = 0; j < QK8_0; ++j) { | |
| const float x0 = xi[j]*id; | |
| dsti->qs[j] = roundf(x0); | |
| } | |
| } | |
| static __device__ void cpy_blck_f32_q4_0(const char * cxi, char * cdsti) { | |
| const float * xi = (const float *) cxi; | |
| block_q4_0 * dsti = (block_q4_0 *) cdsti; | |
| float amax = 0.0f; | |
| float vmax = 0.0f; | |
| for (int j = 0; j < QK4_0; ++j) { | |
| const float v = xi[j]; | |
| if (amax < fabsf(v)) { | |
| amax = fabsf(v); | |
| vmax = v; | |
| } | |
| } | |
| const float d = vmax / -8; | |
| const float id = d ? 1.0f/d : 0.0f; | |
| dsti->d = d; | |
| for (int j = 0; j < QK4_0/2; ++j) { | |
| const float x0 = xi[0 + j]*id; | |
| const float x1 = xi[QK4_0/2 + j]*id; | |
| const uint8_t xi0 = min(15, (int8_t)(x0 + 8.5f)); | |
| const uint8_t xi1 = min(15, (int8_t)(x1 + 8.5f)); | |
| dsti->qs[j] = xi0; | |
| dsti->qs[j] |= xi1 << 4; | |
| } | |
| } | |
| static __device__ void cpy_blck_f32_q4_1(const char * cxi, char * cdsti) { | |
| const float * xi = (const float *) cxi; | |
| block_q4_1 * dsti = (block_q4_1 *) cdsti; | |
| float vmin = FLT_MAX; | |
| float vmax = -FLT_MAX; | |
| for (int j = 0; j < QK4_1; ++j) { | |
| const float v = xi[j]; | |
| if (v < vmin) vmin = v; | |
| if (v > vmax) vmax = v; | |
| } | |
| const float d = (vmax - vmin) / ((1 << 4) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| dsti->dm.x = d; | |
| dsti->dm.y = vmin; | |
| for (int j = 0; j < QK4_1/2; ++j) { | |
| const float x0 = (xi[0 + j] - vmin)*id; | |
| const float x1 = (xi[QK4_1/2 + j] - vmin)*id; | |
| const uint8_t xi0 = min(15, (int8_t)(x0 + 0.5f)); | |
| const uint8_t xi1 = min(15, (int8_t)(x1 + 0.5f)); | |
| dsti->qs[j] = xi0; | |
| dsti->qs[j] |= xi1 << 4; | |
| } | |
| } | |
| template <cpy_kernel_t cpy_blck, int qk> | |
| static __global__ void cpy_f32_q(const char * cx, char * cdst, const int ne, | |
| const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, | |
| const int ne10, const int ne11, const int nb10, const int nb11, const int nb12) { | |
| const int i = (blockDim.x*blockIdx.x + threadIdx.x)*qk; | |
| if (i >= ne) { | |
| return; | |
| } | |
| const int i02 = i / (ne00*ne01); | |
| const int i01 = (i - i02*ne01*ne00) / ne00; | |
| const int i00 = (i - i02*ne01*ne00 - i01*ne00); | |
| const int x_offset = i00*nb00 + i01*nb01 + i02*nb02; | |
| const int i12 = i / (ne10*ne11); | |
| const int i11 = (i - i12*ne10*ne11) / ne10; | |
| const int i10 = (i - i12*ne10*ne11 - i11*ne10)/qk; | |
| const int dst_offset = i10*nb10 + i11*nb11 + i12*nb12; | |
| cpy_blck(cx + x_offset, cdst + dst_offset); | |
| } | |
| static __device__ float rope_yarn_ramp(const float low, const float high, const int i0) { | |
| const float y = (i0 / 2 - low) / max(0.001f, high - low); | |
| return 1.0f - min(1.0f, max(0.0f, y)); | |
| } | |
| struct rope_corr_dims { | |
| float v[4]; | |
| }; | |
| // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn | |
| // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. | |
| static __device__ void rope_yarn( | |
| float theta_extrap, float freq_scale, rope_corr_dims corr_dims, int64_t i0, float ext_factor, float mscale, | |
| float * cos_theta, float * sin_theta | |
| ) { | |
| // Get n-d rotational scaling corrected for extrapolation | |
| float theta_interp = freq_scale * theta_extrap; | |
| float theta = theta_interp; | |
| if (ext_factor != 0.0f) { | |
| float ramp_mix = rope_yarn_ramp(corr_dims.v[0], corr_dims.v[1], i0) * ext_factor; | |
| theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; | |
| // Get n-d magnitude scaling corrected for interpolation | |
| mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); | |
| } | |
| *cos_theta = cosf(theta) * mscale; | |
| *sin_theta = sinf(theta) * mscale; | |
| } | |
| // rope == RoPE == rotary positional embedding | |
| template<typename T, bool has_pos> | |
| static __global__ void rope( | |
| const T * x, T * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base, | |
| float ext_factor, float attn_factor, rope_corr_dims corr_dims | |
| ) { | |
| const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y); | |
| if (col >= ncols) { | |
| return; | |
| } | |
| const int row = blockDim.x*blockIdx.x + threadIdx.x; | |
| const int i = row*ncols + col; | |
| const int i2 = row/p_delta_rows; | |
| const int p = has_pos ? pos[i2] : 0; | |
| const float theta_base = p*powf(freq_base, -float(col)/ncols); | |
| float cos_theta, sin_theta; | |
| rope_yarn(theta_base, freq_scale, corr_dims, col, ext_factor, attn_factor, &cos_theta, &sin_theta); | |
| const float x0 = x[i + 0]; | |
| const float x1 = x[i + 1]; | |
| dst[i + 0] = x0*cos_theta - x1*sin_theta; | |
| dst[i + 1] = x0*sin_theta + x1*cos_theta; | |
| } | |
| template<typename T, bool has_pos> | |
| static __global__ void rope_neox( | |
| const T * x, T * dst, int ncols, int n_dims, const int32_t * pos, float freq_scale, int p_delta_rows, | |
| float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, float inv_ndims | |
| ) { | |
| const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y); | |
| if (col >= ncols) { | |
| return; | |
| } | |
| const int row = blockDim.x*blockIdx.x + threadIdx.x; | |
| const int ib = col / n_dims; | |
| const int ic = col % n_dims; | |
| if (ib > 0) { | |
| const int i = row*ncols + ib*n_dims + ic; | |
| dst[i + 0] = x[i + 0]; | |
| dst[i + 1] = x[i + 1]; | |
| return; | |
| } | |
| const int i = row*ncols + ib*n_dims + ic/2; | |
| const int i2 = row/p_delta_rows; | |
| float cur_rot = inv_ndims * ic - ib; | |
| const int p = has_pos ? pos[i2] : 0; | |
| const float theta_base = p*freq_scale*powf(theta_scale, col/2.0f); | |
| float cos_theta, sin_theta; | |
| rope_yarn(theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta); | |
| const float x0 = x[i + 0]; | |
| const float x1 = x[i + n_dims/2]; | |
| dst[i + 0] = x0*cos_theta - x1*sin_theta; | |
| dst[i + n_dims/2] = x0*sin_theta + x1*cos_theta; | |
| } | |
| static __global__ void rope_glm_f32( | |
| const float * x, float * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base, | |
| int n_ctx | |
| ) { | |
| const int col = blockDim.x*blockIdx.x + threadIdx.x; | |
| const int half_n_dims = ncols/4; | |
| if (col >= half_n_dims) { | |
| return; | |
| } | |
| const int row = blockDim.y*blockIdx.y + threadIdx.y; | |
| const int i = row*ncols + col; | |
| const int i2 = row/p_delta_rows; | |
| const float col_theta_scale = powf(freq_base, -2.0f*col/ncols); | |
| // FIXME: this is likely wrong | |
| const int p = pos != nullptr ? pos[i2] : 0; | |
| const float theta = min(p, n_ctx - 2)*freq_scale*col_theta_scale; | |
| const float sin_theta = sinf(theta); | |
| const float cos_theta = cosf(theta); | |
| const float x0 = x[i + 0]; | |
| const float x1 = x[i + half_n_dims]; | |
| dst[i + 0] = x0*cos_theta - x1*sin_theta; | |
| dst[i + half_n_dims] = x0*sin_theta + x1*cos_theta; | |
| const float block_theta = ((float)max(p - n_ctx - 2, 0))*col_theta_scale; | |
| const float sin_block_theta = sinf(block_theta); | |
| const float cos_block_theta = cosf(block_theta); | |
| const float x2 = x[i + half_n_dims * 2]; | |
| const float x3 = x[i + half_n_dims * 3]; | |
| dst[i + half_n_dims * 2] = x2*cos_block_theta - x3*sin_block_theta; | |
| dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta; | |
| } | |
| static __global__ void alibi_f32(const float * x, float * dst, const int ncols, const int k_rows, | |
| const int n_heads_log2_floor, const float m0, const float m1) { | |
| const int col = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (col >= ncols) { | |
| return; | |
| } | |
| const int row = blockDim.y*blockIdx.y + threadIdx.y; | |
| const int i = row*ncols + col; | |
| const int k = row/k_rows; | |
| float m_k; | |
| if (k < n_heads_log2_floor) { | |
| m_k = powf(m0, k + 1); | |
| } else { | |
| m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1); | |
| } | |
| dst[i] = col * m_k + x[i]; | |
| } | |
| static __global__ void k_sum_rows_f32(const float * x, float * dst, const int ncols) { | |
| const int row = blockIdx.y; | |
| const int col = threadIdx.x; | |
| float sum = 0.0f; | |
| for (int i = col; i < ncols; i += blockDim.x) { | |
| sum += x[row * ncols + i]; | |
| } | |
| sum = warp_reduce_sum(sum); | |
| if (col == 0) { | |
| dst[row] = sum; | |
| } | |
| } | |
| template<typename T> | |
| static inline __device__ void swap(T & a, T & b) { | |
| T tmp = a; | |
| a = b; | |
| b = tmp; | |
| } | |
| template<ggml_sort_order order> | |
| static __global__ void k_argsort_f32_i32(const float * x, int * dst, const int ncols) { | |
| // bitonic sort | |
| int col = threadIdx.x; | |
| int row = blockIdx.y; | |
| if (col >= ncols) return; | |
| const float * x_row = x + row * ncols; | |
| int * dst_row = dst + row * ncols; | |
| // initialize indices | |
| if (col < ncols) { | |
| dst_row[col] = col; | |
| } | |
| __syncthreads(); | |
| for (int k = 2; k <= ncols; k *= 2) { | |
| for (int j = k / 2; j > 0; j /= 2) { | |
| int ixj = col ^ j; | |
| if (ixj > col) { | |
| if ((col & k) == 0) { | |
| if (order == GGML_SORT_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]]) { | |
| swap(dst_row[col], dst_row[ixj]); | |
| } | |
| } else { | |
| if (order == GGML_SORT_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]]) { | |
| swap(dst_row[col], dst_row[ixj]); | |
| } | |
| } | |
| } | |
| __syncthreads(); | |
| } | |
| } | |
| } | |
| static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past) { | |
| const int col = blockDim.y*blockIdx.y + threadIdx.y; | |
| const int row = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (col >= ncols) { | |
| return; | |
| } | |
| const int i = row*ncols + col; | |
| //dst[i] = col > (n_past + row % rows_per_channel) ? -INFINITY : x[i]; | |
| //dst[i] = x[i] - (col > n_past + row % rows_per_channel) * INT_MAX; // equivalent within rounding error but slightly faster on GPU | |
| dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX; | |
| } | |
| template <bool vals_smem, int ncols_template, int block_size_template, bool need_check> | |
| static __global__ void soft_max_f16(const float * x, const float * y, float * dst, const int ncols_par, const int nrows_y, const float scale) { | |
| const int ncols_data = ncols_template == 0 ? ncols_par : ncols_template; | |
| const int ncols_smem = GGML_PAD(ncols_data, 2*WARP_SIZE)/2; | |
| const int tid = threadIdx.x; | |
| const int rowx = blockIdx.x; | |
| const int rowy = rowx % nrows_y; // broadcast the mask (y) in the row dimension | |
| const int block_size = block_size_template == 0 ? blockDim.x : block_size_template; | |
| const int warp_id = threadIdx.x / WARP_SIZE; | |
| const int lane_id = threadIdx.x % WARP_SIZE; | |
| extern __shared__ half data_soft_max_f16[]; | |
| half * buf_iw = data_soft_max_f16 + 0; // shared memory buffer for inter-warp communication | |
| // (shared memory) buffer to cache values between iterations: | |
| half2 * vals = vals_smem ? (half2 *) (buf_iw + WARP_SIZE) : (half2 *) (dst + rowx*ncols_data); | |
| // if the buffer is larger than max. shared memory per block, use dst as temp. buffer instead | |
| // in that case col_smem == col_data must be enforced to avoid race conditions | |
| half2 max_val = make_half2(-INFINITY, -INFINITY); | |
| for (int col0 = 0; col0 < ncols_smem; col0 += block_size) { | |
| const int col_data = 2*col0 + 2*WARP_SIZE*warp_id + lane_id; | |
| const int col_smem = vals_smem ? col0 + tid : col_data; | |
| const int ix = rowx*ncols_data + col_data; | |
| const int iy = rowy*ncols_data + col_data; | |
| half2 val; | |
| if (need_check && col_data + 0 >= ncols_data) { | |
| val.x = -INFINITY; | |
| } else { | |
| val.x = x[ix + 0]*scale + (y ? y[iy + 0] : 0.0f); | |
| } | |
| if (need_check && col_data + WARP_SIZE >= ncols_data) { | |
| val.y = -INFINITY; | |
| } else { | |
| val.y = x[ix + WARP_SIZE]*scale + (y ? y[iy + WARP_SIZE] : 0.0f); | |
| } | |
| if (!need_check || col_smem < (vals_smem ? ncols_smem : ncols_data)) { | |
| vals[col_smem] = val; | |
| } | |
| max_val = __hmax2(max_val, val); | |
| } | |
| // find the max value in the block | |
| max_val = warp_reduce_max(max_val); | |
| if (block_size > WARP_SIZE) { | |
| if (warp_id == 0) { | |
| buf_iw[lane_id] = -INFINITY; | |
| } | |
| __syncthreads(); | |
| if (lane_id == 0) { | |
| buf_iw[warp_id] = __hmax(max_val.x, max_val.y); | |
| } | |
| __syncthreads(); | |
| max_val = __half2half2(buf_iw[lane_id]); | |
| max_val = warp_reduce_max(max_val); | |
| } else { | |
| max_val = __half2half2(__hmax(max_val.x, max_val.y)); | |
| } | |
| half2 tmp = make_half2(0.0f, 0.0f); // partial sums | |
| for (int col0 = 0; col0 < ncols_smem; col0 += block_size) { | |
| const int col_smem = vals_smem ? col0 + tid : 2*col0 + 2*warp_id*WARP_SIZE + lane_id; | |
| if (ncols_template == 0 && col_smem >= (vals_smem ? ncols_smem : ncols_data)) { | |
| break; | |
| } | |
| const half2 val = h2exp(vals[col_smem] - max_val); | |
| tmp += val; | |
| vals[col_smem] = val; | |
| } | |
| // find the sum of exps in the block | |
| tmp = warp_reduce_sum(tmp); | |
| if (block_size > WARP_SIZE) { | |
| if (warp_id == 0) { | |
| buf_iw[lane_id] = 0.0f; | |
| } | |
| __syncthreads(); | |
| if (lane_id == 0) { | |
| buf_iw[warp_id] = tmp.x + tmp.y; | |
| } | |
| __syncthreads(); | |
| tmp = __half2half2(buf_iw[lane_id]); | |
| tmp = warp_reduce_sum(tmp); | |
| } else { | |
| tmp = __half2half2(tmp.x + tmp.y); | |
| } | |
| const half2 inv_sum = make_half2(1.0f, 1.0f) / tmp; | |
| for (int col0 = 0; col0 < ncols_smem; col0 += block_size) { | |
| const int col_data = 2*col0 + 2*WARP_SIZE*warp_id + lane_id; | |
| const int col_smem = vals_smem ? col0 + tid : col_data; | |
| const int idst = rowx*ncols_data + col_data; | |
| const half2 result = vals[col_smem] * inv_sum; | |
| if (need_check && col_data + 0 >= ncols_data) { | |
| return; | |
| } | |
| dst[idst] = result.x; | |
| if (need_check && col_data + WARP_SIZE >= ncols_data) { | |
| return; | |
| } | |
| dst[idst + WARP_SIZE] = result.y; | |
| } | |
| (void) x; (void) y; (void) dst; (void) ncols_par; (void) nrows_y; (void) scale; | |
| bad_arch(); | |
| } | |
| template <bool vals_smem, int ncols_template, int block_size_template> | |
| static __global__ void soft_max_f32(const float * x, const float * y, float * dst, const int ncols_par, const int nrows_y, const float scale) { | |
| const int ncols = ncols_template == 0 ? ncols_par : ncols_template; | |
| const int tid = threadIdx.x; | |
| const int rowx = blockIdx.x; | |
| const int rowy = rowx % nrows_y; // broadcast the mask (y) in the row dimension | |
| const int block_size = block_size_template == 0 ? blockDim.x : block_size_template; | |
| const int warp_id = threadIdx.x / WARP_SIZE; | |
| const int lane_id = threadIdx.x % WARP_SIZE; | |
| extern __shared__ float data_soft_max_f32[]; | |
| float * buf_iw = data_soft_max_f32; // shared memory buffer for inter-warp communication | |
| // shared memory buffer to cache values between iterations: | |
| float * vals = vals_smem ? buf_iw + WARP_SIZE : dst + rowx*ncols; | |
| float max_val = -INFINITY; | |
| for (int col0 = 0; col0 < ncols; col0 += block_size) { | |
| const int col = col0 + tid; | |
| if (ncols_template == 0 && col >= ncols) { | |
| break; | |
| } | |
| const int ix = rowx*ncols + col; | |
| const int iy = rowy*ncols + col; | |
| const float val = x[ix]*scale + (y ? y[iy] : 0.0f); | |
| vals[col] = val; | |
| max_val = max(max_val, val); | |
| } | |
| // find the max value in the block | |
| max_val = warp_reduce_max(max_val); | |
| if (block_size > WARP_SIZE) { | |
| if (warp_id == 0) { | |
| buf_iw[lane_id] = -INFINITY; | |
| } | |
| __syncthreads(); | |
| if (lane_id == 0) { | |
| buf_iw[warp_id] = max_val; | |
| } | |
| __syncthreads(); | |
| max_val = buf_iw[lane_id]; | |
| max_val = warp_reduce_max(max_val); | |
| } | |
| float tmp = 0.0f; // partial sum | |
| for (int col0 = 0; col0 < ncols; col0 += block_size) { | |
| const int col = col0 + tid; | |
| if (ncols_template == 0 && col >= ncols) { | |
| break; | |
| } | |
| const float val = expf(vals[col] - max_val); | |
| tmp += val; | |
| vals[col] = val; | |
| } | |
| // find the sum of exps in the block | |
| tmp = warp_reduce_sum(tmp); | |
| if (block_size > WARP_SIZE) { | |
| if (warp_id == 0) { | |
| buf_iw[lane_id] = 0.0f; | |
| } | |
| __syncthreads(); | |
| if (lane_id == 0) { | |
| buf_iw[warp_id] = tmp; | |
| } | |
| __syncthreads(); | |
| tmp = buf_iw[lane_id]; | |
| tmp = warp_reduce_sum(tmp); | |
| } | |
| const float inv_sum = 1.0f / tmp; | |
| for (int col0 = 0; col0 < ncols; col0 += block_size) { | |
| const int col = col0 + tid; | |
| if (ncols_template == 0 && col >= ncols) { | |
| return; | |
| } | |
| const int idst = rowx*ncols + col; | |
| dst[idst] = vals[col] * inv_sum; | |
| } | |
| } | |
| static __global__ void scale_f32(const float * x, float * dst, const float scale, const int k) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| dst[i] = scale * x[i]; | |
| } | |
| static __global__ void clamp_f32(const float * x, float * dst, const float min, const float max, const int k) { | |
| const int i = blockDim.x*blockIdx.x + threadIdx.x; | |
| if (i >= k) { | |
| return; | |
| } | |
| dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]); | |
| } | |
| static __global__ void im2col_f32_f16( | |
| const float * x, half * dst, | |
| int offset_delta, int IW, int IH, int OW, int KW, int KH, int pelements, int CHW, | |
| int s0, int s1, int p0, int p1, int d0, int d1) { | |
| const int i = threadIdx.x + blockIdx.x * blockDim.x; | |
| if (i >= pelements) { | |
| return; | |
| } | |
| const int ksize = OW * (KH > 1 ? KW : 1); | |
| const int kx = i / ksize; | |
| const int kd = kx * ksize; | |
| const int ky = (i - kd) / OW; | |
| const int ix = i % OW; | |
| const int64_t iiw = ix * s0 + kx * d0 - p0; | |
| const int64_t iih = blockIdx.y * s1 + ky * d1 - p1; | |
| const int64_t offset_dst = | |
| (blockIdx.y * OW + ix) * CHW + | |
| (blockIdx.z * (KW * KH) + ky * KW + kx); | |
| if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { | |
| dst[offset_dst] = __float2half(0.0f); | |
| } else { | |
| const int64_t offset_src = blockIdx.z * offset_delta; | |
| dst[offset_dst] = __float2half(x[offset_src + iih * IW + iiw]); | |
| } | |
| } | |
| template<int qk, int qr, dequantize_kernel_t dq> | |
| static void get_rows_cuda(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const void * src0_dd, const int32_t * src1_dd, float * dst_dd, cudaStream_t stream) { | |
| GGML_TENSOR_BINARY_OP_LOCALS | |
| const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1); | |
| const int block_num_x = (ne00 + 2*CUDA_GET_ROWS_BLOCK_SIZE - 1) / (2*CUDA_GET_ROWS_BLOCK_SIZE); | |
| const dim3 block_nums(block_num_x, ne10, ne11*ne12); | |
| // strides in elements | |
| //const size_t s0 = nb0 / ggml_element_size(dst); | |
| const size_t s1 = nb1 / ggml_element_size(dst); | |
| const size_t s2 = nb2 / ggml_element_size(dst); | |
| const size_t s3 = nb3 / ggml_element_size(dst); | |
| const size_t s10 = nb10 / ggml_element_size(src1); | |
| const size_t s11 = nb11 / ggml_element_size(src1); | |
| const size_t s12 = nb12 / ggml_element_size(src1); | |
| //const size_t s13 = nb13 / ggml_element_size(src1); | |
| GGML_ASSERT(ne00 % 2 == 0); | |
| k_get_rows<qk, qr, dq><<<block_nums, block_dims, 0, stream>>>( | |
| src0_dd, src1_dd, dst_dd, | |
| ne00, /*ne01, ne02, ne03,*/ | |
| /*ne10, ne11,*/ ne12, /*ne13,*/ | |
| /* s0,*/ s1, s2, s3, | |
| /* nb00,*/ nb01, nb02, nb03, | |
| s10, s11, s12/*, s13*/); | |
| (void) dst; | |
| } | |
| template<typename src0_t> | |
| static void get_rows_cuda_float(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const src0_t * src0_dd, const int32_t * src1_dd, float * dst_dd, cudaStream_t stream) { | |
| GGML_TENSOR_BINARY_OP_LOCALS | |
| const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1); | |
| const int block_num_x = (ne00 + CUDA_GET_ROWS_BLOCK_SIZE - 1) / CUDA_GET_ROWS_BLOCK_SIZE; | |
| const dim3 block_nums(block_num_x, ne10, ne11*ne12); | |
| // strides in elements | |
| //const size_t s0 = nb0 / ggml_element_size(dst); | |
| const size_t s1 = nb1 / ggml_element_size(dst); | |
| const size_t s2 = nb2 / ggml_element_size(dst); | |
| const size_t s3 = nb3 / ggml_element_size(dst); | |
| const size_t s10 = nb10 / ggml_element_size(src1); | |
| const size_t s11 = nb11 / ggml_element_size(src1); | |
| const size_t s12 = nb12 / ggml_element_size(src1); | |
| //const size_t s13 = nb13 / ggml_element_size(src1); | |
| k_get_rows_float<<<block_nums, block_dims, 0, stream>>>( | |
| src0_dd, src1_dd, dst_dd, | |
| ne00, /*ne01, ne02, ne03,*/ | |
| /*ne10, ne11,*/ ne12, /*ne13,*/ | |
| /* s0,*/ s1, s2, s3, | |
| /* nb00,*/ nb01, nb02, nb03, | |
| s10, s11, s12/*, s13*/); | |
| (void) dst; | |
| } | |
| template<float (*bin_op)(const float, const float)> | |
| struct bin_bcast_cuda { | |
| template<typename src0_t, typename src1_t, typename dst_t> | |
| void operator()(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, | |
| const src0_t * src0_dd, const src1_t * src1_dd, dst_t * dst_dd, | |
| cudaStream_t stream) { | |
| GGML_TENSOR_BINARY_OP_LOCALS | |
| int nr0 = ne10/ne0; | |
| int nr1 = ne11/ne1; | |
| int nr2 = ne12/ne2; | |
| int nr3 = ne13/ne3; | |
| int nr[4] = { nr0, nr1, nr2, nr3 }; | |
| // collapse dimensions until first broadcast dimension | |
| int64_t cne0[] = {ne0, ne1, ne2, ne3}; | |
| int64_t cne1[] = {ne10, ne11, ne12, ne13}; | |
| size_t cnb0[] = {nb0, nb1, nb2, nb3}; | |
| size_t cnb1[] = {nb10, nb11, nb12, nb13}; | |
| auto collapse = [](int64_t cne[]) { | |
| cne[0] *= cne[1]; | |
| cne[1] = cne[2]; | |
| cne[2] = cne[3]; | |
| cne[3] = 1; | |
| }; | |
| auto collapse_nb = [](size_t cnb[], const int64_t cne[]) { | |
| cnb[1] *= cne[1]; | |
| cnb[2] *= cne[2]; | |
| cnb[3] *= cne[3]; | |
| }; | |
| for (int i = 0; i < 4; i++) { | |
| if (nr[i] != 1) { | |
| break; | |
| } | |
| if (i > 0) { | |
| collapse_nb(cnb0, cne0); | |
| collapse_nb(cnb1, cne1); | |
| collapse(cne0); | |
| collapse(cne1); | |
| } | |
| } | |
| { | |
| int64_t ne0 = cne0[0]; | |
| int64_t ne1 = cne0[1]; | |
| int64_t ne2 = cne0[2]; | |
| int64_t ne3 = cne0[3]; | |
| int64_t ne10 = cne1[0]; | |
| int64_t ne11 = cne1[1]; | |
| int64_t ne12 = cne1[2]; | |
| int64_t ne13 = cne1[3]; | |
| size_t nb0 = cnb0[0]; | |
| size_t nb1 = cnb0[1]; | |
| size_t nb2 = cnb0[2]; | |
| size_t nb3 = cnb0[3]; | |
| size_t nb10 = cnb1[0]; | |
| size_t nb11 = cnb1[1]; | |
| size_t nb12 = cnb1[2]; | |
| size_t nb13 = cnb1[3]; | |
| size_t s0 = nb0 / sizeof(dst_t); | |
| size_t s1 = nb1 / sizeof(dst_t); | |
| size_t s2 = nb2 / sizeof(dst_t); | |
| size_t s3 = nb3 / sizeof(dst_t); | |
| size_t s10 = nb10 / sizeof(src1_t); | |
| size_t s11 = nb11 / sizeof(src1_t); | |
| size_t s12 = nb12 / sizeof(src1_t); | |
| size_t s13 = nb13 / sizeof(src1_t); | |
| GGML_ASSERT(s0 == 1); | |
| GGML_ASSERT(s10 == 1); | |
| const int block_size = 128; | |
| int64_t hne0 = std::max(ne0/2LL, 1LL); | |
| dim3 block_dims; | |
| block_dims.x = std::min<unsigned int>(hne0, block_size); | |
| block_dims.y = std::min<unsigned int>(ne1, block_size / block_dims.x); | |
| block_dims.z = std::min(std::min<unsigned int>(ne2*ne3, block_size / block_dims.x / block_dims.y), 64U); | |
| dim3 block_nums( | |
| (hne0 + block_dims.x - 1) / block_dims.x, | |
| (ne1 + block_dims.y - 1) / block_dims.y, | |
| (ne2*ne3 + block_dims.z - 1) / block_dims.z | |
| ); | |
| if (block_nums.z > 65535) { | |
| // this is the maximum number of blocks in z direction, fallback to 1D grid kernel | |
| int block_num = (ne0*ne1*ne2*ne3 + block_size - 1) / block_size; | |
| k_bin_bcast_unravel<bin_op><<<block_num, block_size, 0, stream>>>( | |
| src0_dd, src1_dd, dst_dd, | |
| ne0, ne1, ne2, ne3, | |
| ne10, ne11, ne12, ne13, | |
| /* s0, */ s1, s2, s3, | |
| /* s10, */ s11, s12, s13); | |
| } else { | |
| k_bin_bcast<bin_op><<<block_nums, block_dims, 0, stream>>>( | |
| src0_dd, src1_dd, dst_dd, | |
| ne0, ne1, ne2, ne3, | |
| ne10, ne11, ne12, ne13, | |
| /* s0, */ s1, s2, s3, | |
| /* s10, */ s11, s12, s13); | |
| } | |
| } | |
| } | |
| }; | |
| static void acc_f32_cuda(const float * x, const float * y, float * dst, const int n_elements, | |
| const int ne10, const int ne11, const int ne12, | |
| const int nb1, const int nb2, const int offset, cudaStream_t stream) { | |
| int num_blocks = (n_elements + CUDA_ACC_BLOCK_SIZE - 1) / CUDA_ACC_BLOCK_SIZE; | |
| acc_f32<<<num_blocks, CUDA_ACC_BLOCK_SIZE, 0, stream>>>(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset); | |
| } | |
| static void gelu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_GELU_BLOCK_SIZE - 1) / CUDA_GELU_BLOCK_SIZE; | |
| gelu_f32<<<num_blocks, CUDA_GELU_BLOCK_SIZE, 0, stream>>>(x, dst, k); | |
| } | |
| static void silu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_SILU_BLOCK_SIZE - 1) / CUDA_SILU_BLOCK_SIZE; | |
| silu_f32<<<num_blocks, CUDA_SILU_BLOCK_SIZE, 0, stream>>>(x, dst, k); | |
| } | |
| static void gelu_quick_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_GELU_BLOCK_SIZE - 1) / CUDA_GELU_BLOCK_SIZE; | |
| gelu_quick_f32<<<num_blocks, CUDA_GELU_BLOCK_SIZE, 0, stream>>>(x, dst, k); | |
| } | |
| static void tanh_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_TANH_BLOCK_SIZE - 1) / CUDA_TANH_BLOCK_SIZE; | |
| tanh_f32<<<num_blocks, CUDA_TANH_BLOCK_SIZE, 0, stream>>>(x, dst, k); | |
| } | |
| static void relu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_RELU_BLOCK_SIZE - 1) / CUDA_RELU_BLOCK_SIZE; | |
| relu_f32<<<num_blocks, CUDA_RELU_BLOCK_SIZE, 0, stream>>>(x, dst, k); | |
| } | |
| static void leaky_relu_f32_cuda(const float * x, float * dst, const int k, const float negative_slope, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_RELU_BLOCK_SIZE - 1) / CUDA_RELU_BLOCK_SIZE; | |
| leaky_relu_f32<<<num_blocks, CUDA_RELU_BLOCK_SIZE, 0, stream>>>(x, dst, k, negative_slope); | |
| } | |
| static void sqr_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_SQR_BLOCK_SIZE - 1) / CUDA_SQR_BLOCK_SIZE; | |
| sqr_f32<<<num_blocks, CUDA_SQR_BLOCK_SIZE, 0, stream>>>(x, dst, k); | |
| } | |
| static void norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % WARP_SIZE == 0); | |
| if (ncols < 1024) { | |
| const dim3 block_dims(WARP_SIZE, 1, 1); | |
| norm_f32<WARP_SIZE><<<nrows, block_dims, 0, stream>>>(x, dst, ncols, eps); | |
| } else { | |
| const dim3 block_dims(1024, 1, 1); | |
| norm_f32<1024><<<nrows, block_dims, 0, stream>>>(x, dst, ncols, eps); | |
| } | |
| } | |
| static void group_norm_f32_cuda(const float * x, float * dst, const int num_groups, const int group_size, const int ne_elements, cudaStream_t stream) { | |
| static const float eps = 1e-6f; | |
| if (group_size < 1024) { | |
| const dim3 block_dims(WARP_SIZE, 1, 1); | |
| group_norm_f32<WARP_SIZE><<<num_groups, block_dims, 0, stream>>>(x, dst, group_size, ne_elements, eps); | |
| } else { | |
| const dim3 block_dims(1024, 1, 1); | |
| group_norm_f32<1024><<<num_groups, block_dims, 0, stream>>>(x, dst, group_size, ne_elements, eps); | |
| } | |
| } | |
| static void concat_f32_cuda(const float * x, const float * y, float * dst, const int ne0, int ne1, int ne2, int ne02, cudaStream_t stream) { | |
| int num_blocks = (ne0 + CUDA_CONCAT_BLOCK_SIZE - 1) / CUDA_CONCAT_BLOCK_SIZE; | |
| dim3 gridDim(num_blocks, ne1, ne2); | |
| concat_f32<<<gridDim, CUDA_CONCAT_BLOCK_SIZE, 0, stream>>>(x, y, dst, ne0, ne02); | |
| } | |
| static void upscale_f32_cuda(const float * x, float * dst, const int ne00, const int ne01, const int ne02, const int scale_factor, cudaStream_t stream) { | |
| int ne0 = (ne00 * scale_factor); | |
| int num_blocks = (ne0 + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; | |
| dim3 gridDim(num_blocks, (ne01 * scale_factor), ne02); | |
| upscale_f32<<<gridDim, CUDA_UPSCALE_BLOCK_SIZE, 0, stream>>>(x, dst, ne00, ne00 * ne01, scale_factor); | |
| } | |
| static void pad_f32_cuda(const float * x, float * dst, | |
| const int ne00, const int ne01, const int ne02, | |
| const int ne0, const int ne1, const int ne2, cudaStream_t stream) { | |
| int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; | |
| dim3 gridDim(num_blocks, ne1, ne2); | |
| pad_f32<<<gridDim, CUDA_PAD_BLOCK_SIZE, 0, stream>>>(x, dst, ne0, ne00, ne01, ne02); | |
| } | |
| static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % WARP_SIZE == 0); | |
| if (ncols < 1024) { | |
| const dim3 block_dims(WARP_SIZE, 1, 1); | |
| rms_norm_f32<WARP_SIZE><<<nrows, block_dims, 0, stream>>>(x, dst, ncols, eps); | |
| } else { | |
| const dim3 block_dims(1024, 1, 1); | |
| rms_norm_f32<1024><<<nrows, block_dims, 0, stream>>>(x, dst, ncols, eps); | |
| } | |
| } | |
| static void quantize_row_q8_1_cuda(const float * x, void * vy, const int kx, const int ky, const int kx_padded, cudaStream_t stream) { | |
| const int block_num_x = (kx_padded + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE; | |
| const dim3 num_blocks(block_num_x, ky, 1); | |
| const dim3 block_size(CUDA_DEQUANTIZE_BLOCK_SIZE, 1, 1); | |
| quantize_q8_1<<<num_blocks, block_size, 0, stream>>>(x, vy, kx, kx_padded); | |
| } | |
| template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t> | |
| static void dequantize_block_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + 2*CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / (2*CUDA_DEQUANTIZE_BLOCK_SIZE); | |
| dequantize_block<qk, qr, dequantize_kernel><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k); | |
| } | |
| template<typename dst_t> | |
| static void dequantize_row_q2_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) { | |
| const int nb = k / QK_K; | |
| dequantize_block_q2_K<<<nb, 64, 0, stream>>>(vx, y); | |
| dequantize_block_q2_K<<<nb, 32, 0, stream>>>(vx, y); | |
| } | |
| template<typename dst_t> | |
| static void dequantize_row_q3_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) { | |
| const int nb = k / QK_K; | |
| dequantize_block_q3_K<<<nb, 64, 0, stream>>>(vx, y); | |
| dequantize_block_q3_K<<<nb, 32, 0, stream>>>(vx, y); | |
| } | |
| template<typename dst_t> | |
| static void dequantize_row_q4_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) { | |
| const int nb = k / QK_K; | |
| dequantize_block_q4_K<<<nb, 32, 0, stream>>>(vx, y); | |
| } | |
| template<typename dst_t> | |
| static void dequantize_row_q5_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) { | |
| const int nb = k / QK_K; | |
| dequantize_block_q5_K<<<nb, 64, 0, stream>>>(vx, y); | |
| dequantize_block_q5_K<<<nb, 32, 0, stream>>>(vx, y); | |
| } | |
| template<typename dst_t> | |
| static void dequantize_row_q6_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) { | |
| const int nb = k / QK_K; | |
| dequantize_block_q6_K<<<nb, 64, 0, stream>>>(vx, y); | |
| dequantize_block_q6_K<<<nb, 32, 0, stream>>>(vx, y); | |
| } | |
| template<typename dst_t> | |
| static void dequantize_row_iq2_xxs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) { | |
| const int nb = k / QK_K; | |
| dequantize_block_iq2_xxs<<<nb, 32, 0, stream>>>(vx, y); | |
| } | |
| template<typename dst_t> | |
| static void dequantize_row_iq2_xs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) { | |
| const int nb = k / QK_K; | |
| dequantize_block_iq2_xs<<<nb, 32, 0, stream>>>(vx, y); | |
| } | |
| template <typename src_t, typename dst_t> | |
| static void convert_unary_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE; | |
| convert_unary<src_t><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k); | |
| } | |
| static to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { | |
| switch (type) { | |
| case GGML_TYPE_Q4_0: | |
| return dequantize_block_cuda<QK4_0, QR4_0, dequantize_q4_0>; | |
| case GGML_TYPE_Q4_1: | |
| return dequantize_block_cuda<QK4_1, QR4_1, dequantize_q4_1>; | |
| case GGML_TYPE_Q5_0: | |
| return dequantize_block_cuda<QK5_0, QR5_0, dequantize_q5_0>; | |
| case GGML_TYPE_Q5_1: | |
| return dequantize_block_cuda<QK5_1, QR5_1, dequantize_q5_1>; | |
| case GGML_TYPE_Q8_0: | |
| return dequantize_block_cuda<QK8_0, QR8_0, dequantize_q8_0>; | |
| case GGML_TYPE_Q2_K: | |
| return dequantize_row_q2_K_cuda; | |
| case GGML_TYPE_Q3_K: | |
| return dequantize_row_q3_K_cuda; | |
| case GGML_TYPE_Q4_K: | |
| return dequantize_row_q4_K_cuda; | |
| case GGML_TYPE_Q5_K: | |
| return dequantize_row_q5_K_cuda; | |
| case GGML_TYPE_Q6_K: | |
| return dequantize_row_q6_K_cuda; | |
| case GGML_TYPE_IQ2_XXS: | |
| return dequantize_row_iq2_xxs_cuda; | |
| case GGML_TYPE_IQ2_XS: | |
| return dequantize_row_iq2_xs_cuda; | |
| case GGML_TYPE_F32: | |
| return convert_unary_cuda<float>; | |
| default: | |
| return nullptr; | |
| } | |
| } | |
| static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { | |
| switch (type) { | |
| case GGML_TYPE_Q4_0: | |
| return dequantize_block_cuda<QK4_0, QR4_0, dequantize_q4_0>; | |
| case GGML_TYPE_Q4_1: | |
| return dequantize_block_cuda<QK4_1, QR4_1, dequantize_q4_1>; | |
| case GGML_TYPE_Q5_0: | |
| return dequantize_block_cuda<QK5_0, QR5_0, dequantize_q5_0>; | |
| case GGML_TYPE_Q5_1: | |
| return dequantize_block_cuda<QK5_1, QR5_1, dequantize_q5_1>; | |
| case GGML_TYPE_Q8_0: | |
| return dequantize_block_cuda<QK8_0, QR8_0, dequantize_q8_0>; | |
| case GGML_TYPE_Q2_K: | |
| return dequantize_row_q2_K_cuda; | |
| case GGML_TYPE_Q3_K: | |
| return dequantize_row_q3_K_cuda; | |
| case GGML_TYPE_Q4_K: | |
| return dequantize_row_q4_K_cuda; | |
| case GGML_TYPE_Q5_K: | |
| return dequantize_row_q5_K_cuda; | |
| case GGML_TYPE_Q6_K: | |
| return dequantize_row_q6_K_cuda; | |
| case GGML_TYPE_IQ2_XXS: | |
| return dequantize_row_iq2_xxs_cuda; | |
| case GGML_TYPE_IQ2_XS: | |
| return dequantize_row_iq2_xs_cuda; | |
| case GGML_TYPE_F16: | |
| return convert_unary_cuda<half>; | |
| default: | |
| return nullptr; | |
| } | |
| } | |
| static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| // the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void dequantize_mul_mat_vec_q2_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2 | |
| const int block_num_y = (nrows + ny - 1) / ny; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(32, ny, 1); | |
| dequantize_mul_mat_vec_q2_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void dequantize_mul_mat_vec_q3_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int ny = 2 / K_QUANTS_PER_ITERATION; | |
| const int block_num_y = (nrows + ny - 1) / ny; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(32, ny, 1); | |
| dequantize_mul_mat_vec_q3_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void dequantize_mul_mat_vec_q4_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int ny = 2 / K_QUANTS_PER_ITERATION; | |
| const int block_num_y = (nrows + ny - 1) / ny; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(32, ny, 1); | |
| dequantize_mul_mat_vec_q4_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void dequantize_mul_mat_vec_q5_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const dim3 block_dims(32, 1, 1); | |
| dequantize_mul_mat_vec_q5_k<<<nrows, block_dims, 0, stream>>>(vx, y, dst, ncols); | |
| } | |
| static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int ny = 2 / K_QUANTS_PER_ITERATION; | |
| const int block_num_y = (nrows + ny - 1) / ny; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(32, ny, 1); | |
| dequantize_mul_mat_vec_q6_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| dequantize_mul_mat_vec<1, 1, convert_f16> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q4_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK4_0 == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q4_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK4_1 == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK4_0, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q5_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK5_0 == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q5_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK5_1 == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q8_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK8_0 == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q2_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q3_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q4_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q5_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_q6_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_iq2_xxs_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK_K, QI2_XXS, block_iq2_xxs, 1, vec_dot_iq2_xxs_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void mul_mat_vec_iq2_xs_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| GGML_ASSERT(ncols % QK_K == 0); | |
| const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; | |
| const dim3 block_nums(block_num_y, 1, 1); | |
| const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); | |
| mul_mat_vec_q<QK_K, QI2_XS, block_iq2_xs, 1, vec_dot_iq2_xs_q8_1> | |
| <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows); | |
| } | |
| static void ggml_mul_mat_q4_0_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q4_0_RDNA2; | |
| mmq_y = MMQ_Y_Q4_0_RDNA2; | |
| nwarps = NWARPS_Q4_0_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q4_0_RDNA1; | |
| mmq_y = MMQ_Y_Q4_0_RDNA1; | |
| nwarps = NWARPS_Q4_0_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q4_0_AMPERE; | |
| mmq_y = MMQ_Y_Q4_0_AMPERE; | |
| nwarps = NWARPS_Q4_0_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q4_0_PASCAL; | |
| mmq_y = MMQ_Y_Q4_0_PASCAL; | |
| nwarps = NWARPS_Q4_0_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_q4_1_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q4_1_RDNA2; | |
| mmq_y = MMQ_Y_Q4_1_RDNA2; | |
| nwarps = NWARPS_Q4_1_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q4_1_RDNA1; | |
| mmq_y = MMQ_Y_Q4_1_RDNA1; | |
| nwarps = NWARPS_Q4_1_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q4_1_AMPERE; | |
| mmq_y = MMQ_Y_Q4_1_AMPERE; | |
| nwarps = NWARPS_Q4_1_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q4_1_PASCAL; | |
| mmq_y = MMQ_Y_Q4_1_PASCAL; | |
| nwarps = NWARPS_Q4_1_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_q5_0_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q5_0_RDNA2; | |
| mmq_y = MMQ_Y_Q5_0_RDNA2; | |
| nwarps = NWARPS_Q5_0_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q5_0_RDNA1; | |
| mmq_y = MMQ_Y_Q5_0_RDNA1; | |
| nwarps = NWARPS_Q5_0_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q5_0_AMPERE; | |
| mmq_y = MMQ_Y_Q5_0_AMPERE; | |
| nwarps = NWARPS_Q5_0_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q5_0_PASCAL; | |
| mmq_y = MMQ_Y_Q5_0_PASCAL; | |
| nwarps = NWARPS_Q5_0_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_q5_1_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q5_1_RDNA2; | |
| mmq_y = MMQ_Y_Q5_1_RDNA2; | |
| nwarps = NWARPS_Q5_1_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q5_1_RDNA1; | |
| mmq_y = MMQ_Y_Q5_1_RDNA1; | |
| nwarps = NWARPS_Q5_1_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q5_1_AMPERE; | |
| mmq_y = MMQ_Y_Q5_1_AMPERE; | |
| nwarps = NWARPS_Q5_1_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q5_1_PASCAL; | |
| mmq_y = MMQ_Y_Q5_1_PASCAL; | |
| nwarps = NWARPS_Q5_1_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_q8_0_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q8_0_RDNA2; | |
| mmq_y = MMQ_Y_Q8_0_RDNA2; | |
| nwarps = NWARPS_Q8_0_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q8_0_RDNA1; | |
| mmq_y = MMQ_Y_Q8_0_RDNA1; | |
| nwarps = NWARPS_Q8_0_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q8_0_AMPERE; | |
| mmq_y = MMQ_Y_Q8_0_AMPERE; | |
| nwarps = NWARPS_Q8_0_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q8_0_PASCAL; | |
| mmq_y = MMQ_Y_Q8_0_PASCAL; | |
| nwarps = NWARPS_Q8_0_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_q2_K_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q2_K_RDNA2; | |
| mmq_y = MMQ_Y_Q2_K_RDNA2; | |
| nwarps = NWARPS_Q2_K_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q2_K_RDNA1; | |
| mmq_y = MMQ_Y_Q2_K_RDNA1; | |
| nwarps = NWARPS_Q2_K_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q2_K_AMPERE; | |
| mmq_y = MMQ_Y_Q2_K_AMPERE; | |
| nwarps = NWARPS_Q2_K_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q2_K_PASCAL; | |
| mmq_y = MMQ_Y_Q2_K_PASCAL; | |
| nwarps = NWARPS_Q2_K_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_q3_K_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q3_K_RDNA2; | |
| mmq_y = MMQ_Y_Q3_K_RDNA2; | |
| nwarps = NWARPS_Q3_K_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q3_K_RDNA1; | |
| mmq_y = MMQ_Y_Q3_K_RDNA1; | |
| nwarps = NWARPS_Q3_K_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q3_K_AMPERE; | |
| mmq_y = MMQ_Y_Q3_K_AMPERE; | |
| nwarps = NWARPS_Q3_K_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q3_K_PASCAL; | |
| mmq_y = MMQ_Y_Q3_K_PASCAL; | |
| nwarps = NWARPS_Q3_K_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_q4_K_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q4_K_RDNA2; | |
| mmq_y = MMQ_Y_Q4_K_RDNA2; | |
| nwarps = NWARPS_Q4_K_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q4_K_RDNA1; | |
| mmq_y = MMQ_Y_Q4_K_RDNA1; | |
| nwarps = NWARPS_Q4_K_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q4_K_AMPERE; | |
| mmq_y = MMQ_Y_Q4_K_AMPERE; | |
| nwarps = NWARPS_Q4_K_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q4_K_PASCAL; | |
| mmq_y = MMQ_Y_Q4_K_PASCAL; | |
| nwarps = NWARPS_Q4_K_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_q5_K_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q5_K_RDNA2; | |
| mmq_y = MMQ_Y_Q5_K_RDNA2; | |
| nwarps = NWARPS_Q5_K_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q5_K_RDNA1; | |
| mmq_y = MMQ_Y_Q5_K_RDNA1; | |
| nwarps = NWARPS_Q5_K_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q5_K_AMPERE; | |
| mmq_y = MMQ_Y_Q5_K_AMPERE; | |
| nwarps = NWARPS_Q5_K_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q5_K_PASCAL; | |
| mmq_y = MMQ_Y_Q5_K_PASCAL; | |
| nwarps = NWARPS_Q5_K_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_q6_K_q8_1_cuda( | |
| const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, | |
| const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| const int compute_capability = g_device_caps[id].cc; | |
| int mmq_x, mmq_y, nwarps; | |
| if (compute_capability >= CC_RDNA2) { | |
| mmq_x = MMQ_X_Q6_K_RDNA2; | |
| mmq_y = MMQ_Y_Q6_K_RDNA2; | |
| nwarps = NWARPS_Q6_K_RDNA2; | |
| } else if (compute_capability >= CC_OFFSET_AMD) { | |
| mmq_x = MMQ_X_Q6_K_RDNA1; | |
| mmq_y = MMQ_Y_Q6_K_RDNA1; | |
| nwarps = NWARPS_Q6_K_RDNA1; | |
| } else if (compute_capability >= CC_VOLTA) { | |
| mmq_x = MMQ_X_Q6_K_AMPERE; | |
| mmq_y = MMQ_Y_Q6_K_AMPERE; | |
| nwarps = NWARPS_Q6_K_AMPERE; | |
| } else if (compute_capability >= MIN_CC_DP4A) { | |
| mmq_x = MMQ_X_Q6_K_PASCAL; | |
| mmq_y = MMQ_Y_Q6_K_PASCAL; | |
| nwarps = NWARPS_Q6_K_PASCAL; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; | |
| const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; | |
| const dim3 block_nums(block_num_x, block_num_y, 1); | |
| const dim3 block_dims(WARP_SIZE, nwarps, 1); | |
| if (nrows_x % mmq_y == 0) { | |
| const bool need_check = false; | |
| mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } else { | |
| const bool need_check = true; | |
| mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>> | |
| (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); | |
| } | |
| } | |
| static void ggml_mul_mat_p021_f16_f32_cuda( | |
| const void * vx, const float * y, float * dst, const int ncols_x, const int nrows_x, | |
| const int nchannels_x, const int nchannels_y, cudaStream_t stream) { | |
| const dim3 block_nums(1, nrows_x, nchannels_y); | |
| const dim3 block_dims(WARP_SIZE, 1, 1); | |
| mul_mat_p021_f16_f32<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols_x, nrows_x, nchannels_x, nchannels_y); | |
| } | |
| static void ggml_mul_mat_vec_nc_f16_f32_cuda( | |
| const void * vx, const float * y, float * dst, const int ncols_x, const int nrows_x, const int row_stride_x, | |
| const int nchannels_x, const int nchannels_y, const int channel_stride_x, cudaStream_t stream) { | |
| const dim3 block_nums(1, nrows_x, nchannels_y); | |
| const dim3 block_dims(WARP_SIZE, 1, 1); | |
| mul_mat_vec_nc_f16_f32<<<block_nums, block_dims, 0, stream>>> | |
| (vx, y, dst, ncols_x, nrows_x, row_stride_x, channel_stride_x, nchannels_y/nchannels_x); | |
| } | |
| static void ggml_cpy_f32_f32_cuda( | |
| const char * cx, char * cdst, const int ne, | |
| const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, | |
| const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) { | |
| const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; | |
| cpy_f32_f16<cpy_1_f32_f32><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>> | |
| (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12); | |
| } | |
| static void ggml_cpy_f32_f16_cuda( | |
| const char * cx, char * cdst, const int ne, | |
| const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, | |
| const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) { | |
| const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; | |
| cpy_f32_f16<cpy_1_f32_f16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>> | |
| (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12); | |
| } | |
| static void ggml_cpy_f32_q8_0_cuda( | |
| const char * cx, char * cdst, const int ne, | |
| const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, | |
| const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) { | |
| GGML_ASSERT(ne % QK8_0 == 0); | |
| const int num_blocks = ne / QK8_0; | |
| cpy_f32_q<cpy_blck_f32_q8_0, QK8_0><<<num_blocks, 1, 0, stream>>> | |
| (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12); | |
| } | |
| static void ggml_cpy_f32_q4_0_cuda( | |
| const char * cx, char * cdst, const int ne, | |
| const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, | |
| const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) { | |
| GGML_ASSERT(ne % QK4_0 == 0); | |
| const int num_blocks = ne / QK4_0; | |
| cpy_f32_q<cpy_blck_f32_q4_0, QK4_0><<<num_blocks, 1, 0, stream>>> | |
| (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12); | |
| } | |
| static void ggml_cpy_f32_q4_1_cuda( | |
| const char * cx, char * cdst, const int ne, | |
| const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, | |
| const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) { | |
| GGML_ASSERT(ne % QK4_1 == 0); | |
| const int num_blocks = ne / QK4_1; | |
| cpy_f32_q<cpy_blck_f32_q4_1, QK4_1><<<num_blocks, 1, 0, stream>>> | |
| (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12); | |
| } | |
| static void ggml_cpy_f16_f16_cuda( | |
| const char * cx, char * cdst, const int ne, | |
| const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, | |
| const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) { | |
| const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; | |
| cpy_f32_f16<cpy_1_f16_f16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>> | |
| (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12); | |
| } | |
| static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE; | |
| scale_f32<<<num_blocks, CUDA_SCALE_BLOCK_SIZE, 0, stream>>>(x, dst, scale, k); | |
| } | |
| static void clamp_f32_cuda(const float * x, float * dst, const float min, const float max, const int k, cudaStream_t stream) { | |
| const int num_blocks = (k + CUDA_CLAMP_BLOCK_SIZE - 1) / CUDA_CLAMP_BLOCK_SIZE; | |
| clamp_f32<<<num_blocks, CUDA_CLAMP_BLOCK_SIZE, 0, stream>>>(x, dst, min, max, k); | |
| } | |
| template<typename T> | |
| static void rope_cuda( | |
| const T * x, T * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, | |
| float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream | |
| ) { | |
| GGML_ASSERT(ncols % 2 == 0); | |
| const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); | |
| const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); | |
| const dim3 block_nums(nrows, num_blocks_x, 1); | |
| if (pos == nullptr) { | |
| rope<T, false><<<block_nums, block_dims, 0, stream>>>( | |
| x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims | |
| ); | |
| } else { | |
| rope<T, true><<<block_nums, block_dims, 0, stream>>>( | |
| x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims | |
| ); | |
| } | |
| } | |
| template<typename T> | |
| static void rope_neox_cuda( | |
| const T * x, T * dst, int ncols, int n_dims, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, | |
| float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream | |
| ) { | |
| GGML_ASSERT(ncols % 2 == 0); | |
| const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); | |
| const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); | |
| const dim3 block_nums(nrows, num_blocks_x, 1); | |
| const float theta_scale = powf(freq_base, -2.0f/n_dims); | |
| const float inv_ndims = -1.0f / n_dims; | |
| if (pos == nullptr) { | |
| rope_neox<T, false><<<block_nums, block_dims, 0, stream>>>( | |
| x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims, | |
| theta_scale, inv_ndims | |
| ); | |
| } else { | |
| rope_neox<T, true><<<block_nums, block_dims, 0, stream>>>( | |
| x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims, | |
| theta_scale, inv_ndims | |
| ); | |
| } | |
| } | |
| static void rope_glm_f32_cuda( | |
| const float * x, float * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, | |
| float freq_base, int n_ctx, cudaStream_t stream | |
| ) { | |
| GGML_ASSERT(ncols % 4 == 0); | |
| const dim3 block_dims(CUDA_ROPE_BLOCK_SIZE/4, 1, 1); | |
| const int num_blocks_x = (ncols + CUDA_ROPE_BLOCK_SIZE - 1) / CUDA_ROPE_BLOCK_SIZE; | |
| const dim3 block_nums(num_blocks_x, nrows, 1); | |
| rope_glm_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, n_ctx); | |
| } | |
| static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, | |
| const int k_rows, const int n_heads_log2_floor, const float m0, | |
| const float m1, cudaStream_t stream) { | |
| const dim3 block_dims(CUDA_ALIBI_BLOCK_SIZE, 1, 1); | |
| const int num_blocks_x = (ncols + CUDA_ALIBI_BLOCK_SIZE - 1) / (CUDA_ALIBI_BLOCK_SIZE); | |
| const dim3 block_nums(num_blocks_x, nrows, 1); | |
| alibi_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, k_rows, n_heads_log2_floor, m0, m1); | |
| } | |
| static void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) { | |
| const dim3 block_dims(WARP_SIZE, 1, 1); | |
| const dim3 block_nums(1, nrows, 1); | |
| k_sum_rows_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols); | |
| } | |
| static void argsort_f32_i32_cuda(const float * x, int * dst, const int ncols, const int nrows, ggml_sort_order order, cudaStream_t stream) { | |
| // bitonic sort requires ncols to be power of 2 | |
| GGML_ASSERT((ncols & (ncols - 1)) == 0); | |
| const dim3 block_dims(ncols, 1, 1); | |
| const dim3 block_nums(1, nrows, 1); | |
| if (order == GGML_SORT_ASC) { | |
| k_argsort_f32_i32<GGML_SORT_ASC><<<block_nums, block_dims, 0, stream>>>(x, dst, ncols); | |
| } else if (order == GGML_SORT_DESC) { | |
| k_argsort_f32_i32<GGML_SORT_DESC><<<block_nums, block_dims, 0, stream>>>(x, dst, ncols); | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| } | |
| static void diag_mask_inf_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, const int rows_per_channel, const int n_past, cudaStream_t stream) { | |
| const dim3 block_dims(1, CUDA_DIAG_MASK_INF_BLOCK_SIZE, 1); | |
| const int block_num_x = (ncols_x + CUDA_DIAG_MASK_INF_BLOCK_SIZE - 1) / CUDA_DIAG_MASK_INF_BLOCK_SIZE; | |
| const dim3 block_nums(nrows_x, block_num_x, 1); | |
| diag_mask_inf_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x, rows_per_channel, n_past); | |
| } | |
| static void soft_max_f16_cuda(const float * x, const float * y, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, cudaStream_t stream) { | |
| int nth = WARP_SIZE; | |
| while (nth < ncols_x/2 && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2; | |
| const dim3 block_dims(nth, 1, 1); | |
| const dim3 block_nums(nrows_x, 1, 1); | |
| const size_t shmem = (GGML_PAD(ncols_x, 2*WARP_SIZE) + WARP_SIZE)*sizeof(half); | |
| static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted."); | |
| if (shmem <= g_device_caps[g_main_device].smpb) { | |
| switch (ncols_x) { | |
| case 32: | |
| soft_max_f16<true, 32, 32, true><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 64: | |
| soft_max_f16<true, 64, 32, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 128: | |
| soft_max_f16<true, 128, 64, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 256: | |
| soft_max_f16<true, 256, 128, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 512: | |
| soft_max_f16<true, 512, 256, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 1024: | |
| soft_max_f16<true, 1024, 512, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 2048: | |
| soft_max_f16<true, 2048, 1024, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 4096: | |
| soft_max_f16<true, 4096, 1024, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| default: | |
| soft_max_f16<true, 0, 0, true><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| } | |
| } else { | |
| const size_t shmem_low = WARP_SIZE*sizeof(half); | |
| soft_max_f16<false, 0, 0, true><<<block_nums, block_dims, shmem_low, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| } | |
| } | |
| static void soft_max_f32_cuda(const float * x, const float * y, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, cudaStream_t stream) { | |
| int nth = WARP_SIZE; | |
| while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2; | |
| const dim3 block_dims(nth, 1, 1); | |
| const dim3 block_nums(nrows_x, 1, 1); | |
| const size_t shmem = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float); | |
| static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted."); | |
| if (shmem < g_device_caps[g_main_device].smpb) { | |
| switch (ncols_x) { | |
| case 32: | |
| soft_max_f32<true, 32, 32><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 64: | |
| soft_max_f32<true, 64, 64><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 128: | |
| soft_max_f32<true, 128, 128><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 256: | |
| soft_max_f32<true, 256, 256><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 512: | |
| soft_max_f32<true, 512, 512><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 1024: | |
| soft_max_f32<true, 1024, 1024><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 2048: | |
| soft_max_f32<true, 2048, 1024><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| case 4096: | |
| soft_max_f32<true, 4096, 1024><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| default: | |
| soft_max_f32<true, 0, 0><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| break; | |
| } | |
| } else { | |
| const size_t shmem_low = WARP_SIZE*sizeof(float); | |
| soft_max_f32<false, 0, 0><<<block_nums, block_dims, shmem_low, stream>>>(x, y, dst, ncols_x, nrows_y, scale); | |
| } | |
| } | |
| static void im2col_f32_f16_cuda(const float* x, half* dst, | |
| int IW, int IH, int OW, int OH, int KW, int KH, int IC, | |
| int offset_delta, | |
| int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { | |
| const int parallel_elements = OW * KW * KH; | |
| const int num_blocks = (parallel_elements + CUDA_IM2COL_BLOCK_SIZE - 1) / CUDA_IM2COL_BLOCK_SIZE; | |
| dim3 block_nums(num_blocks, OH, IC); | |
| im2col_f32_f16<<<block_nums, CUDA_IM2COL_BLOCK_SIZE, 0, stream>>>(x, dst, offset_delta, IW, IH, OW, KW, KH, parallel_elements, (IC * KH * KW), s0, s1, p0, p1, d0, d1); | |
| } | |
| // buffer pool for cuda | |
| struct scoped_spin_lock { | |
| std::atomic_flag& lock; | |
| scoped_spin_lock(std::atomic_flag& lock) : lock(lock) { | |
| while (lock.test_and_set(std::memory_order_acquire)) { | |
| ; // spin | |
| } | |
| } | |
| ~scoped_spin_lock() { | |
| lock.clear(std::memory_order_release); | |
| } | |
| scoped_spin_lock(const scoped_spin_lock&) = delete; | |
| scoped_spin_lock& operator=(const scoped_spin_lock&) = delete; | |
| }; | |
| static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT; | |
| // #define DEBUG_CUDA_MALLOC | |
| struct ggml_cuda_buffer { | |
| void * ptr = nullptr; | |
| size_t size = 0; | |
| }; | |
| static ggml_cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS]; | |
| static size_t g_cuda_pool_size[GGML_CUDA_MAX_DEVICES] = {0}; | |
| static void * ggml_cuda_pool_malloc_leg(int device, size_t size, size_t * actual_size) { | |
| scoped_spin_lock lock(g_cuda_pool_lock); | |
| int nnz = 0; | |
| size_t max_size = 0; | |
| size_t best_diff = 1ull << 36; | |
| int ibest = -1; | |
| for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) { | |
| ggml_cuda_buffer& b = g_cuda_buffer_pool[device][i]; | |
| if (b.ptr != nullptr) { | |
| ++nnz; | |
| if (b.size > max_size) max_size = b.size; | |
| if (b.size >= size) { | |
| size_t diff = b.size - size; | |
| if (diff < best_diff) { | |
| best_diff = diff; | |
| ibest = i; | |
| if (!best_diff) { | |
| void * ptr = b.ptr; | |
| *actual_size = b.size; | |
| b.ptr = nullptr; | |
| b.size = 0; | |
| return ptr; | |
| } | |
| } | |
| } | |
| } | |
| } | |
| if (ibest >= 0) { | |
| ggml_cuda_buffer& b = g_cuda_buffer_pool[device][ibest]; | |
| void * ptr = b.ptr; | |
| *actual_size = b.size; | |
| b.ptr = nullptr; | |
| b.size = 0; | |
| return ptr; | |
| } | |
| void * ptr; | |
| size_t look_ahead_size = (size_t) (1.05 * size); | |
| look_ahead_size = 256 * ((look_ahead_size + 255)/256); | |
| ggml_cuda_set_device(device); | |
| CUDA_CHECK(cudaMalloc((void **) &ptr, look_ahead_size)); | |
| *actual_size = look_ahead_size; | |
| g_cuda_pool_size[device] += look_ahead_size; | |
| fprintf(stderr, "%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, requested %u MB\n", __func__, id, nnz, | |
| (uint32_t)(max_size/1024/1024), (uint32_t)(g_cuda_pool_size[id]/1024/1024), (uint32_t)(size/1024/1024)); | |
| return ptr; | |
| } | |
| static void ggml_cuda_pool_free_leg(int device, void * ptr, size_t size) { | |
| scoped_spin_lock lock(g_cuda_pool_lock); | |
| for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) { | |
| ggml_cuda_buffer& b = g_cuda_buffer_pool[device][i]; | |
| if (b.ptr == nullptr) { | |
| b.ptr = ptr; | |
| b.size = size; | |
| return; | |
| } | |
| } | |
| fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n"); | |
| ggml_cuda_set_device(device); | |
| CUDA_CHECK(cudaFree(ptr)); | |
| g_cuda_pool_size[device] -= size; | |
| } | |
| // pool with virtual memory | |
| static CUdeviceptr g_cuda_pool_addr[GGML_CUDA_MAX_DEVICES] = {0}; | |
| static size_t g_cuda_pool_used[GGML_CUDA_MAX_DEVICES] = {0}; | |
| static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB | |
| static void * ggml_cuda_pool_malloc_vmm(int device, size_t size, size_t * actual_size) { | |
| scoped_spin_lock lock(g_cuda_pool_lock); | |
| // round up the allocation size to the alignment to ensure that all allocations are aligned for all data types | |
| const size_t alignment = 128; | |
| size = alignment * ((size + alignment - 1) / alignment); | |
| size_t avail = g_cuda_pool_size[device] - g_cuda_pool_used[device]; | |
| if (size > avail) { | |
| // round up to the next multiple of the granularity | |
| size_t reserve_size = size - avail; | |
| const size_t granularity = g_device_caps[device].vmm_granularity; | |
| reserve_size = granularity * ((reserve_size + granularity - 1) / granularity); | |
| GGML_ASSERT(g_cuda_pool_size[device] + reserve_size <= CUDA_POOL_VMM_MAX_SIZE); | |
| // allocate more physical memory | |
| CUmemAllocationProp prop = {}; | |
| prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; | |
| prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; | |
| prop.location.id = device; | |
| CUmemGenericAllocationHandle handle; | |
| CU_CHECK(cuMemCreate(&handle, reserve_size, &prop, 0)); | |
| // reserve virtual address space (if not already reserved) | |
| if (g_cuda_pool_addr[device] == 0) { | |
| CU_CHECK(cuMemAddressReserve(&g_cuda_pool_addr[device], CUDA_POOL_VMM_MAX_SIZE, 0, 0, 0)); | |
| } | |
| // map at the end of the pool | |
| CU_CHECK(cuMemMap(g_cuda_pool_addr[device] + g_cuda_pool_size[device], reserve_size, 0, handle, 0)); | |
| // the memory allocation handle is no longer needed after mapping | |
| CU_CHECK(cuMemRelease(handle)); | |
| // set access | |
| CUmemAccessDesc access = {}; | |
| access.location.type = CU_MEM_LOCATION_TYPE_DEVICE; | |
| access.location.id = device; | |
| access.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE; | |
| CU_CHECK(cuMemSetAccess(g_cuda_pool_addr[device] + g_cuda_pool_size[device], reserve_size, &access, 1)); | |
| // add to the pool | |
| g_cuda_pool_size[device] += reserve_size; | |
| //printf("cuda pool[%d]: size increased to %llu MB (reserved %llu MB)\n", | |
| // id, (unsigned long long) (g_cuda_pool_size[id]/1024/1024), | |
| // (unsigned long long) (reserve_size/1024/1024)); | |
| } | |
| GGML_ASSERT(g_cuda_pool_addr[device] != 0); | |
| void * ptr = (void *) (g_cuda_pool_addr[device] + g_cuda_pool_used[device]); | |
| *actual_size = size; | |
| g_cuda_pool_used[device] += size; | |
| printf("cuda pool[%d]: allocated %llu bytes at %llx [%s]\n", id, (unsigned long long) size, ptr); | |
| return ptr; | |
| } | |
| static void ggml_cuda_pool_free_vmm(int device, void * ptr, size_t size) { | |
| scoped_spin_lock lock(g_cuda_pool_lock); | |
| printf("cuda pool[%d]: freed %llu bytes at %llx\n", id, (unsigned long long) size, ptr); | |
| g_cuda_pool_used[device] -= size; | |
| // all deallocations must be in reverse order of the allocations | |
| GGML_ASSERT(ptr == (void *) (g_cuda_pool_addr[device] + g_cuda_pool_used[device])); | |
| } | |
| static void * ggml_cuda_pool_malloc(int device, size_t size, size_t * actual_size) { | |
| if (g_device_caps[device].vmm) { | |
| return ggml_cuda_pool_malloc_vmm(device, size, actual_size); | |
| } else { | |
| return ggml_cuda_pool_malloc_leg(device, size, actual_size); | |
| } | |
| } | |
| static void ggml_cuda_pool_free(int device, void * ptr, size_t size) { | |
| if (g_device_caps[device].vmm) { | |
| ggml_cuda_pool_free_vmm(device, ptr, size); | |
| } else { | |
| ggml_cuda_pool_free_leg(device, ptr, size); | |
| } | |
| } | |
| template<typename T> | |
| struct cuda_pool_alloc { | |
| int device = -1; | |
| T * ptr = nullptr; | |
| size_t actual_size = 0; | |
| // size is in number of elements | |
| T * alloc(size_t size) { | |
| GGML_ASSERT(ptr == nullptr); | |
| CUDA_CHECK(cudaGetDevice(&device)); | |
| ptr = (T *) ggml_cuda_pool_malloc(device, size * sizeof(T), &this->actual_size); | |
| return ptr; | |
| } | |
| cuda_pool_alloc(size_t size) { | |
| alloc(size); | |
| } | |
| ~cuda_pool_alloc() { | |
| if (ptr != nullptr) { | |
| ggml_cuda_pool_free(device, ptr, actual_size); | |
| } | |
| } | |
| T * get() { | |
| return ptr; | |
| } | |
| cuda_pool_alloc() = default; | |
| cuda_pool_alloc(const cuda_pool_alloc &) = delete; | |
| cuda_pool_alloc(cuda_pool_alloc &&) = delete; | |
| cuda_pool_alloc& operator=(const cuda_pool_alloc &) = delete; | |
| cuda_pool_alloc& operator=(cuda_pool_alloc &&) = delete; | |
| }; | |
| static bool g_cublas_loaded = false; | |
| bool ggml_cublas_loaded(void) { | |
| return g_cublas_loaded; | |
| } | |
| void ggml_init_cublas() { | |
| static bool initialized = false; | |
| if (!initialized) { | |
| // Workaround for a rocBLAS bug when using multiple graphics cards: | |
| // https://github.com/ROCmSoftwarePlatform/rocBLAS/issues/1346 | |
| rocblas_initialize(); | |
| CUDA_CHECK(cudaDeviceSynchronize()); | |
| if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) { | |
| initialized = true; | |
| g_cublas_loaded = false; | |
| return; | |
| } | |
| GGML_ASSERT(g_device_count <= GGML_CUDA_MAX_DEVICES); | |
| int64_t total_vram = 0; | |
| fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: yes\n", __func__); | |
| fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: no\n", __func__); | |
| fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: yes\n", __func__); | |
| fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: no\n", __func__); | |
| fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, g_device_count); | |
| for (int id = 0; id < g_device_count; ++id) { | |
| int device_vmm = 0; | |
| CUdevice device; | |
| CU_CHECK(cuDeviceGet(&device, id)); | |
| CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); | |
| if (device_vmm) { | |
| CUmemAllocationProp alloc_prop = {}; | |
| alloc_prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; | |
| alloc_prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; | |
| alloc_prop.location.id = id; | |
| CU_CHECK(cuMemGetAllocationGranularity(&g_device_caps[id].vmm_granularity, &alloc_prop, CU_MEM_ALLOC_GRANULARITY_RECOMMENDED)); | |
| } | |
| g_device_caps[id].vmm = !!device_vmm; | |
| cudaDeviceProp prop; | |
| CUDA_CHECK(cudaGetDeviceProperties(&prop, id)); | |
| fprintf(stderr, " Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no"); | |
| g_tensor_split[id] = total_vram; | |
| total_vram += prop.totalGlobalMem; | |
| g_device_caps[id].cc = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD; | |
| g_device_caps[id].cc = 100*prop.major + 10*prop.minor; | |
| g_device_caps[id].smpb = prop.sharedMemPerBlock; | |
| } | |
| for (int id = 0; id < g_device_count; ++id) { | |
| g_tensor_split[id] /= total_vram; | |
| } | |
| for (int id = 0; id < g_device_count; ++id) { | |
| ggml_cuda_set_device(id); | |
| // create cuda streams | |
| for (int is = 0; is < MAX_STREAMS; ++is) { | |
| CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams[id][is], cudaStreamNonBlocking)); | |
| } | |
| // create cublas handle | |
| CUBLAS_CHECK(cublasCreate(&g_cublas_handles[id])); | |
| CUBLAS_CHECK(cublasSetMathMode(g_cublas_handles[id], CUBLAS_TF32_TENSOR_OP_MATH)); | |
| } | |
| // configure logging to stdout | |
| // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr)); | |
| initialized = true; | |
| g_cublas_loaded = true; | |
| } | |
| } | |
| void ggml_cuda_set_tensor_split(const float * tensor_split) { | |
| if (tensor_split == nullptr) { | |
| return; | |
| } | |
| bool all_zero = true; | |
| for (int i = 0; i < g_device_count; ++i) { | |
| if (tensor_split[i] != 0.0f) { | |
| all_zero = false; | |
| break; | |
| } | |
| } | |
| if (all_zero) { | |
| return; | |
| } | |
| float split_sum = 0.0f; | |
| for (int i = 0; i < g_device_count; ++i) { | |
| g_tensor_split[i] = split_sum; | |
| split_sum += tensor_split[i]; | |
| } | |
| for (int i = 0; i < g_device_count; ++i) { | |
| g_tensor_split[i] /= split_sum; | |
| } | |
| } | |
| void * ggml_cuda_host_malloc(size_t size) { | |
| if (getenv("GGML_CUDA_NO_PINNED") != nullptr) { | |
| return nullptr; | |
| } | |
| void * ptr = nullptr; | |
| cudaError_t err = cudaMallocHost((void **) &ptr, size); | |
| if (err != cudaSuccess) { | |
| // clear the error | |
| cudaGetLastError(); | |
| fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n", | |
| size/1024.0/1024.0, cudaGetErrorString(err)); | |
| return nullptr; | |
| } | |
| return ptr; | |
| } | |
| void ggml_cuda_host_free(void * ptr) { | |
| CUDA_CHECK(cudaFreeHost(ptr)); | |
| } | |
| static cudaError_t ggml_cuda_cpy_tensor_2d( | |
| void * dst, const struct ggml_tensor * src, int64_t i3, int64_t i2, int64_t i1_low, int64_t i1_high, cudaStream_t stream) { | |
| cudaMemcpyKind kind; | |
| char * src_ptr; | |
| if (src->backend == GGML_BACKEND_CPU) { | |
| kind = cudaMemcpyHostToDevice; | |
| src_ptr = (char *) src->data; | |
| } else if (src->backend == GGML_BACKEND_GPU || src->backend == GGML_BACKEND_GPU_SPLIT) { | |
| GGML_ASSERT(src->backend != GGML_BACKEND_GPU_SPLIT || (i1_low == 0 && i1_high == src->ne[1])); | |
| kind = cudaMemcpyDeviceToDevice; | |
| ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra; | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| src_ptr = (char *) extra->data_device[id]; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| char * dst_ptr = (char *) dst; | |
| const int64_t ne0 = src->ne[0]; | |
| const int64_t nb0 = src->nb[0]; | |
| const int64_t nb1 = src->nb[1]; | |
| const int64_t nb2 = src->nb[2]; | |
| const int64_t nb3 = src->nb[3]; | |
| const enum ggml_type type = src->type; | |
| const int64_t ts = ggml_type_size(type); | |
| const int64_t bs = ggml_blck_size(type); | |
| int64_t i1_diff = i1_high - i1_low; | |
| const char * x = src_ptr + i1_low*nb1 + i2*nb2 + i3*nb3; | |
| if (nb0 == ts && nb1 == ts*ne0/bs) { | |
| return cudaMemcpyAsync(dst_ptr, x, i1_diff*nb1, kind, stream); | |
| } else if (nb0 == ts) { | |
| return cudaMemcpy2DAsync(dst_ptr, ts*ne0/bs, x, nb1, ts*ne0/bs, i1_diff, kind, stream); | |
| } else { | |
| for (int64_t i1 = 0; i1 < i1_diff; i1++) { | |
| const void * rx = (const void *) ((const char *) x + i1*nb1); | |
| void * rd = (void *) (dst_ptr + i1*ts*ne0/bs); | |
| // pretend the row is a matrix with cols=1 | |
| cudaError_t r = cudaMemcpy2DAsync(rd, ts/bs, rx, nb0, ts/bs, ne0, kind, stream); | |
| if (r != cudaSuccess) return r; | |
| } | |
| return cudaSuccess; | |
| } | |
| } | |
| static void ggml_cuda_op_get_rows( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_d, const float * src1_d, float * dst_d, cudaStream_t stream) { | |
| GGML_ASSERT(src1->type == GGML_TYPE_I32); | |
| GGML_ASSERT(dst->type == GGML_TYPE_F32); | |
| GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); | |
| GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); | |
| GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type)); | |
| const int32_t * src1_i32 = (const int32_t *) src1_d; | |
| switch (src0->type) { | |
| case GGML_TYPE_F16: | |
| get_rows_cuda_float(src0, src1, dst, (const half *)src0_d, src1_i32, dst_d, stream); | |
| break; | |
| case GGML_TYPE_F32: | |
| get_rows_cuda_float(src0, src1, dst, src0_d, src1_i32, dst_d, stream); | |
| break; | |
| case GGML_TYPE_Q4_0: | |
| get_rows_cuda<QK4_0, QR4_0, dequantize_q4_0>(src0, src1, dst, src0_d, src1_i32, dst_d, stream); | |
| break; | |
| case GGML_TYPE_Q4_1: | |
| get_rows_cuda<QK4_1, QR4_1, dequantize_q4_1>(src0, src1, dst, src0_d, src1_i32, dst_d, stream); | |
| break; | |
| case GGML_TYPE_Q5_0: | |
| get_rows_cuda<QK5_0, QR5_0, dequantize_q5_0>(src0, src1, dst, src0_d, src1_i32, dst_d, stream); | |
| break; | |
| case GGML_TYPE_Q5_1: | |
| get_rows_cuda<QK5_1, QR5_1, dequantize_q5_1>(src0, src1, dst, src0_d, src1_i32, dst_d, stream); | |
| break; | |
| case GGML_TYPE_Q8_0: | |
| get_rows_cuda<QK8_0, QR8_0, dequantize_q8_0>(src0, src1, dst, src0_d, src1_i32, dst_d, stream); | |
| break; | |
| default: | |
| // TODO: k-quants | |
| fprintf(stderr, "%s: unsupported type: %s\n", __func__, ggml_type_name(src0->type)); | |
| GGML_ASSERT(false); | |
| break; | |
| } | |
| } | |
| template<class op> | |
| static void ggml_cuda_op_bin_bcast( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); | |
| if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { | |
| op()(src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream); | |
| } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { | |
| op()(src0, src1, dst, (const half *) src0_dd, src1_dd, (half *) dst_dd, main_stream); | |
| } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { | |
| op()(src0, src1, dst, (const half *) src0_dd, src1_dd, dst_dd, main_stream); | |
| } else { | |
| fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__, | |
| ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type)); | |
| GGML_ASSERT(false); | |
| } | |
| } | |
| static void ggml_cuda_op_repeat( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_d, const float * src1_d, float * dst_d, cudaStream_t main_stream) { | |
| ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_repeat>>(dst, src0, dst, nullptr, src0_d, dst_d, main_stream); | |
| (void) src1; | |
| (void) src1_d; | |
| } | |
| static void ggml_cuda_op_add( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_add>>(src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream); | |
| } | |
| static void ggml_cuda_op_acc( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| GGML_ASSERT(dst->ne[3] == 1); // just 3D tensors supported | |
| int nb1 = dst->op_params[0] / 4; // 4 bytes of float32 | |
| int nb2 = dst->op_params[1] / 4; // 4 bytes of float32 | |
| // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused | |
| int offset = dst->op_params[3] / 4; // offset in bytes | |
| acc_f32_cuda(src0_dd, src1_dd, dst_dd, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], nb1, nb2, offset, main_stream); | |
| (void) dst; | |
| } | |
| static void ggml_cuda_op_mul( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_mul>>(src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream); | |
| } | |
| static void ggml_cuda_op_div( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_div>>(src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream); | |
| } | |
| static void ggml_cuda_op_gelu( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| gelu_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_silu( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| silu_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_gelu_quick( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| gelu_quick_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_tanh( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| tanh_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_relu( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| relu_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_leaky_relu( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| float negative_slope; | |
| memcpy(&negative_slope, dst->op_params, sizeof(float)); | |
| leaky_relu_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), negative_slope, main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_sqr( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| sqr_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_norm( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t nrows = ggml_nrows(src0); | |
| float eps; | |
| memcpy(&eps, dst->op_params, sizeof(float)); | |
| norm_f32_cuda(src0_dd, dst_dd, ne00, nrows, eps, main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_group_norm( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| int num_groups = dst->op_params[0]; | |
| int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups); | |
| group_norm_f32_cuda(src0_dd, dst_dd, num_groups, group_size, src0->ne[0] * src0->ne[1] * src0->ne[2], main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_concat( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); | |
| GGML_ASSERT(dst->type == GGML_TYPE_F32); | |
| for (int i3 = 0; i3 < dst->ne[3]; i3++) { | |
| concat_f32_cuda(src0_dd + i3 * (src0->nb[3] / 4), src1_dd + i3 * (src1->nb[3] / 4), dst_dd + i3 * (dst->nb[3] / 4), dst->ne[0], dst->ne[1], dst->ne[2], src0->ne[2], main_stream); | |
| } | |
| (void) src1; | |
| (void) dst; | |
| } | |
| static void ggml_cuda_op_upscale( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT(dst->type == GGML_TYPE_F32); | |
| GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors | |
| const int scale_factor = dst->op_params[0]; | |
| upscale_f32_cuda(src0_dd, dst_dd, src0->ne[0], src0->ne[1], src0->ne[2], scale_factor, main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_pad( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT(dst->type == GGML_TYPE_F32); | |
| GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors | |
| pad_f32_cuda(src0_dd, dst_dd, | |
| src0->ne[0], src0->ne[1], src0->ne[2], | |
| dst->ne[0], dst->ne[1], dst->ne[2], main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_rms_norm( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t nrows = ggml_nrows(src0); | |
| float eps; | |
| memcpy(&eps, dst->op_params, sizeof(float)); | |
| rms_norm_f32_cuda(src0_dd, dst_dd, ne00, nrows, eps, main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_mul_mat_q( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, | |
| const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, | |
| const int64_t src1_padded_row_size, cudaStream_t stream) { | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t ne10 = src1->ne[0]; | |
| GGML_ASSERT(ne10 % QK8_1 == 0); | |
| const int64_t ne0 = dst->ne[0]; | |
| const int64_t row_diff = row_high - row_low; | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| // the main device has a larger memory buffer to hold the results from all GPUs | |
| // nrows_dst == nrows of the matrix that the dequantize_mul_mat kernel writes into | |
| const int64_t nrows_dst = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : row_diff; | |
| switch (src0->type) { | |
| case GGML_TYPE_Q4_0: | |
| ggml_mul_mat_q4_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| case GGML_TYPE_Q4_1: | |
| ggml_mul_mat_q4_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| case GGML_TYPE_Q5_0: | |
| ggml_mul_mat_q5_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| case GGML_TYPE_Q5_1: | |
| ggml_mul_mat_q5_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| case GGML_TYPE_Q8_0: | |
| ggml_mul_mat_q8_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| case GGML_TYPE_Q2_K: | |
| ggml_mul_mat_q2_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| case GGML_TYPE_Q3_K: | |
| ggml_mul_mat_q3_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| case GGML_TYPE_Q4_K: | |
| ggml_mul_mat_q4_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| case GGML_TYPE_Q5_K: | |
| ggml_mul_mat_q5_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| case GGML_TYPE_Q6_K: | |
| ggml_mul_mat_q6_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); | |
| break; | |
| default: | |
| GGML_ASSERT(false); | |
| break; | |
| } | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_ddf_i; | |
| } | |
| static int64_t get_row_rounding(ggml_type type) { | |
| int64_t min_compute_capability = INT_MAX; | |
| int64_t max_compute_capability = INT_MIN; | |
| for (int id = 0; id < g_device_count; ++id) { | |
| if (g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { | |
| if (min_compute_capability > g_device_caps[id].cc) { | |
| min_compute_capability = g_device_caps[id].cc; | |
| } | |
| if (max_compute_capability < g_device_caps[id].cc) { | |
| max_compute_capability = g_device_caps[id].cc; | |
| } | |
| } | |
| } | |
| switch(type) { | |
| case GGML_TYPE_Q4_0: | |
| case GGML_TYPE_Q4_1: | |
| case GGML_TYPE_Q5_0: | |
| case GGML_TYPE_Q5_1: | |
| case GGML_TYPE_Q8_0: | |
| return max_compute_capability >= CC_RDNA2 ? 128 : 64; | |
| case GGML_TYPE_F16: | |
| case GGML_TYPE_F32: | |
| return 1; | |
| case GGML_TYPE_Q2_K: | |
| return max_compute_capability >= CC_RDNA2 ? 128 : 32; | |
| case GGML_TYPE_Q3_K: | |
| return min_compute_capability < CC_RDNA2 ? 128 : 64; | |
| case GGML_TYPE_Q4_K: | |
| case GGML_TYPE_Q5_K: | |
| case GGML_TYPE_Q6_K: | |
| case GGML_TYPE_IQ2_XXS: | |
| case GGML_TYPE_IQ2_XS: | |
| return max_compute_capability >= CC_RDNA2 ? 128 : 64; | |
| default: | |
| GGML_ASSERT(false); | |
| } | |
| switch(type) { | |
| case GGML_TYPE_Q4_0: | |
| case GGML_TYPE_Q4_1: | |
| return max_compute_capability >= CC_VOLTA ? 128 : 64; | |
| case GGML_TYPE_Q5_0: | |
| case GGML_TYPE_Q5_1: | |
| case GGML_TYPE_Q8_0: | |
| return 64; | |
| case GGML_TYPE_F16: | |
| case GGML_TYPE_F32: | |
| return 1; | |
| case GGML_TYPE_Q2_K: | |
| case GGML_TYPE_Q3_K: | |
| case GGML_TYPE_Q4_K: | |
| case GGML_TYPE_Q5_K: | |
| case GGML_TYPE_IQ2_XXS: | |
| case GGML_TYPE_IQ2_XS: | |
| return max_compute_capability >= CC_VOLTA ? 128 : 64; | |
| case GGML_TYPE_Q6_K: | |
| return 64; | |
| default: | |
| GGML_ASSERT(false); | |
| } | |
| } | |
| static void ggml_cuda_op_mul_mat_vec_q( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, | |
| const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, | |
| const int64_t src1_padded_row_size, cudaStream_t stream) { | |
| GGML_ASSERT(ggml_nrows(src1) == 1); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t row_diff = row_high - row_low; | |
| switch (src0->type) { | |
| case GGML_TYPE_Q4_0: | |
| mul_mat_vec_q4_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q4_1: | |
| mul_mat_vec_q4_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q5_0: | |
| mul_mat_vec_q5_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q5_1: | |
| mul_mat_vec_q5_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q8_0: | |
| mul_mat_vec_q8_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q2_K: | |
| mul_mat_vec_q2_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q3_K: | |
| mul_mat_vec_q3_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q4_K: | |
| mul_mat_vec_q4_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q5_K: | |
| mul_mat_vec_q5_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q6_K: | |
| mul_mat_vec_q6_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_IQ2_XXS: | |
| mul_mat_vec_iq2_xxs_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_IQ2_XS: | |
| mul_mat_vec_iq2_xs_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| default: | |
| GGML_ASSERT(false); | |
| break; | |
| } | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_ddf_i; | |
| (void) src1_ncols; | |
| (void) src1_padded_row_size; | |
| } | |
| static void ggml_cuda_op_dequantize_mul_mat_vec( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, | |
| const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, | |
| const int64_t src1_padded_row_size, cudaStream_t stream) { | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t row_diff = row_high - row_low; | |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); | |
| // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics | |
| cuda_pool_alloc<half> src1_dfloat_a; | |
| half * src1_dfloat = nullptr; // dfloat == half | |
| bool src1_convert_f16 = | |
| src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 || | |
| src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 || | |
| src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16; | |
| if (src1_convert_f16) { | |
| src1_dfloat = src1_dfloat_a.alloc(ne00); | |
| ggml_cpy_f32_f16_cuda((const char *) src1_ddf_i, (char *) src1_dfloat, ne00, | |
| ne00, 1, sizeof(float), 0, 0, | |
| ne00, 1, sizeof(half), 0, 0, stream); | |
| } | |
| const dfloat * src1_dfloat = (const dfloat *) src1_ddf_i; // dfloat == float, no conversion | |
| switch (src0->type) { | |
| case GGML_TYPE_Q4_0: | |
| dequantize_mul_mat_vec_q4_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q4_1: | |
| dequantize_mul_mat_vec_q4_1_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q5_0: | |
| dequantize_mul_mat_vec_q5_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q5_1: | |
| dequantize_mul_mat_vec_q5_1_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q8_0: | |
| dequantize_mul_mat_vec_q8_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q2_K: | |
| dequantize_mul_mat_vec_q2_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q3_K: | |
| dequantize_mul_mat_vec_q3_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q4_K: | |
| dequantize_mul_mat_vec_q4_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q5_K: | |
| dequantize_mul_mat_vec_q5_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_Q6_K: | |
| dequantize_mul_mat_vec_q6_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| case GGML_TYPE_F16: | |
| convert_mul_mat_vec_f16_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); | |
| break; | |
| default: | |
| GGML_ASSERT(false); | |
| break; | |
| } | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_ddq_i; | |
| (void) src1_ncols; | |
| (void) src1_padded_row_size; | |
| } | |
| static void ggml_cuda_op_mul_mat_cublas( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, | |
| const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, | |
| const int64_t src1_padded_row_size, cudaStream_t stream) { | |
| GGML_ASSERT(src0_dd_i != nullptr); | |
| GGML_ASSERT(src1_ddf_i != nullptr); | |
| GGML_ASSERT(dst_dd_i != nullptr); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t ne10 = src1->ne[0]; | |
| const int64_t ne0 = dst->ne[0]; | |
| const int64_t row_diff = row_high - row_low; | |
| int id; | |
| CUDA_CHECK(cudaGetDevice(&id)); | |
| // the main device has a larger memory buffer to hold the results from all GPUs | |
| // ldc == nrows of the matrix that cuBLAS writes into | |
| int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : row_diff; | |
| const int compute_capability = g_device_caps[id].cc; | |
| if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { | |
| //printf("this branch\n"); | |
| // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 | |
| cuda_pool_alloc<half> src0_as_f16; | |
| if (src0->type != GGML_TYPE_F16) { | |
| const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src0->type); | |
| GGML_ASSERT(to_fp16_cuda != nullptr); | |
| size_t ne = row_diff*ne00; | |
| src0_as_f16.alloc(ne); | |
| to_fp16_cuda(src0_dd_i, src0_as_f16.get(), ne, stream); | |
| } | |
| const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16.get(); | |
| cuda_pool_alloc<half> src1_as_f16; | |
| if (src1->type != GGML_TYPE_F16) { | |
| const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); | |
| GGML_ASSERT(to_fp16_cuda != nullptr); | |
| size_t ne = src1_ncols*ne10; | |
| src1_as_f16.alloc(ne); | |
| to_fp16_cuda(src1_ddf_i, src1_as_f16.get(), ne, stream); | |
| } | |
| const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16.get(); | |
| cuda_pool_alloc<half> dst_f16(row_diff*src1_ncols); | |
| const half alpha_f16 = 1.0f; | |
| const half beta_f16 = 0.0f; | |
| CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream)); | |
| CUBLAS_CHECK( | |
| cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, | |
| row_diff, src1_ncols, ne10, | |
| &alpha_f16, src0_ptr, CUDA_R_16F, ne00, | |
| src1_ptr, CUDA_R_16F, ne10, | |
| &beta_f16, dst_f16.get(), CUDA_R_16F, ldc, | |
| CUBLAS_COMPUTE_16F, | |
| CUBLAS_GEMM_DEFAULT_TENSOR_OP)); | |
| const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); | |
| to_fp32_cuda(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream); | |
| } else { | |
| cuda_pool_alloc<float> src0_ddq_as_f32; | |
| cuda_pool_alloc<float> src1_ddq_as_f32; | |
| if (src0->type != GGML_TYPE_F32) { | |
| const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type); | |
| GGML_ASSERT(to_fp32_cuda != nullptr); | |
| src0_ddq_as_f32.alloc(row_diff*ne00); | |
| to_fp32_cuda(src0_dd_i, src0_ddq_as_f32.get(), row_diff*ne00, stream); | |
| } | |
| if (src1->type != GGML_TYPE_F32) { | |
| const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src1->type); | |
| GGML_ASSERT(to_fp32_cuda != nullptr); | |
| src1_ddq_as_f32.alloc(src1_ncols*ne10); | |
| to_fp32_cuda(src1_ddf_i, src1_ddq_as_f32.get(), src1_ncols*ne10, stream); | |
| } | |
| const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get(); | |
| const float * src1_ddf1_i = src1->type == GGML_TYPE_F32 ? (const float *) src1_ddf_i : src1_ddq_as_f32.get(); | |
| const float alpha = 1.0f; | |
| const float beta = 0.0f; | |
| CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream)); | |
| CUBLAS_CHECK( | |
| cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, | |
| row_diff, src1_ncols, ne10, | |
| &alpha, src0_ddf_i, ne00, | |
| src1_ddf1_i, ne10, | |
| &beta, dst_dd_i, ldc)); | |
| } | |
| (void) dst; | |
| (void) src1_ddq_i; | |
| (void) src1_padded_row_size; | |
| } | |
| static void ggml_cuda_op_rope( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); | |
| GGML_ASSERT(src0->type == dst->type); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t ne01 = src0->ne[1]; | |
| const int64_t ne2 = dst->ne[2]; | |
| const int64_t nrows = ggml_nrows(src0); | |
| //const int n_past = ((int32_t *) dst->op_params)[0]; | |
| const int n_dims = ((int32_t *) dst->op_params)[1]; | |
| const int mode = ((int32_t *) dst->op_params)[2]; | |
| const int n_ctx = ((int32_t *) dst->op_params)[3]; | |
| const int n_orig_ctx = ((int32_t *) dst->op_params)[4]; | |
| // RoPE alteration for extended context | |
| float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; | |
| memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); | |
| memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); | |
| memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); | |
| memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); | |
| memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); | |
| memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); | |
| const int32_t * pos = nullptr; | |
| if ((mode & 1) == 0) { | |
| GGML_ASSERT(src1->type == GGML_TYPE_I32); | |
| GGML_ASSERT(src1->ne[0] == ne2); | |
| pos = (const int32_t *) src1_dd; | |
| } | |
| const bool is_neox = mode & 2; | |
| const bool is_glm = mode & 4; | |
| rope_corr_dims corr_dims; | |
| ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims.v); | |
| // compute | |
| if (is_glm) { | |
| GGML_ASSERT(false); | |
| rope_glm_f32_cuda(src0_dd, dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, n_ctx, main_stream); | |
| } else if (is_neox) { | |
| if (src0->type == GGML_TYPE_F32) { | |
| rope_neox_cuda( | |
| (const float *)src0_dd, (float *)dst_dd, ne00, n_dims, nrows, pos, freq_scale, ne01, freq_base, ext_factor, | |
| attn_factor, corr_dims, main_stream | |
| ); | |
| } else if (src0->type == GGML_TYPE_F16) { | |
| rope_neox_cuda( | |
| (const half *)src0_dd, (half *)dst_dd, ne00, n_dims, nrows, pos, freq_scale, ne01, freq_base, ext_factor, | |
| attn_factor, corr_dims, main_stream | |
| ); | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| } else { | |
| if (src0->type == GGML_TYPE_F32) { | |
| rope_cuda( | |
| (const float *)src0_dd, (float *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor, | |
| attn_factor, corr_dims, main_stream | |
| ); | |
| } else if (src0->type == GGML_TYPE_F16) { | |
| rope_cuda( | |
| (const half *)src0_dd, (half *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor, | |
| attn_factor, corr_dims, main_stream | |
| ); | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| } | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_alibi( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t ne01 = src0->ne[1]; | |
| const int64_t ne02 = src0->ne[2]; | |
| const int64_t nrows = ggml_nrows(src0); | |
| //const int n_past = ((int32_t *) dst->op_params)[0]; | |
| const int n_head = ((int32_t *) dst->op_params)[1]; | |
| float max_bias; | |
| memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float)); | |
| //GGML_ASSERT(ne01 + n_past == ne00); | |
| GGML_ASSERT(n_head == ne02); | |
| const int n_heads_log2_floor = 1 << (int) floor(log2(n_head)); | |
| const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor); | |
| const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor); | |
| alibi_f32_cuda(src0_dd, dst_dd, ne00, nrows, ne01, n_heads_log2_floor, m0, m1, main_stream); | |
| (void) src1; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_im2col( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F16); | |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F16); | |
| const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; | |
| const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; | |
| const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; | |
| const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; | |
| const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; | |
| const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; | |
| const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1; | |
| const int64_t IC = src1->ne[is_2D ? 2 : 1]; | |
| const int64_t IH = is_2D ? src1->ne[1] : 1; | |
| const int64_t IW = src1->ne[0]; | |
| const int64_t KH = is_2D ? src0->ne[1] : 1; | |
| const int64_t KW = src0->ne[0]; | |
| const int64_t OH = is_2D ? dst->ne[2] : 1; | |
| const int64_t OW = dst->ne[1]; | |
| const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32 | |
| im2col_f32_f16_cuda(src1_dd, (half*) dst_dd, IW, IH, OW, OH, KW, KH, IC, delta_offset, s0, s1, p0, p1, d0, d1, main_stream); | |
| (void) src0; | |
| (void) src0_dd; | |
| } | |
| static void ggml_cuda_op_sum_rows( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| const int64_t ncols = src0->ne[0]; | |
| const int64_t nrows = ggml_nrows(src0); | |
| sum_rows_f32_cuda(src0_dd, dst_dd, ncols, nrows, main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_argsort( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_I32); | |
| const int64_t ncols = src0->ne[0]; | |
| const int64_t nrows = ggml_nrows(src0); | |
| enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0]; | |
| argsort_f32_i32_cuda(src0_dd, (int *)dst_dd, ncols, nrows, order, main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_diag_mask_inf( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t ne01 = src0->ne[1]; | |
| const int nrows0 = ggml_nrows(src0); | |
| const int n_past = ((int32_t *) dst->op_params)[0]; | |
| diag_mask_inf_f32_cuda(src0_dd, dst_dd, ne00, nrows0, ne01, n_past, main_stream); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_soft_max( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t nrows_x = ggml_nrows(src0); | |
| const int64_t nrows_y = src1 ? ggml_nrows(src1) : 1; | |
| float scale = 1.0f; | |
| memcpy(&scale, dst->op_params, sizeof(float)); | |
| const bool use_f16_soft_max = false; | |
| const bool use_f16_soft_max = true; | |
| const bool use_f16_soft_max = false; | |
| if (use_f16_soft_max) { | |
| soft_max_f16_cuda(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00, nrows_x, nrows_y, scale, main_stream); | |
| } else { | |
| soft_max_f32_cuda(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00, nrows_x, nrows_y, scale, main_stream); | |
| } | |
| (void) dst; | |
| } | |
| static void ggml_cuda_op_scale( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| float scale; | |
| memcpy(&scale, dst->op_params, sizeof(float)); | |
| scale_f32_cuda(src0_dd, dst_dd, scale, ggml_nelements(src0), main_stream); | |
| CUDA_CHECK(cudaGetLastError()); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_clamp( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, | |
| const float * src0_dd, const float * src1_dd, float * dst_dd, cudaStream_t main_stream) { | |
| GGML_ASSERT(src0->type == GGML_TYPE_F32); | |
| GGML_ASSERT( dst->type == GGML_TYPE_F32); | |
| float min; | |
| float max; | |
| memcpy(&min, dst->op_params, sizeof(float)); | |
| memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); | |
| clamp_f32_cuda(src0_dd, dst_dd, min, max, ggml_nelements(src0), main_stream); | |
| CUDA_CHECK(cudaGetLastError()); | |
| (void) src1; | |
| (void) dst; | |
| (void) src1_dd; | |
| } | |
| static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const ggml_cuda_op_flatten_t op) { | |
| const int64_t nrows0 = ggml_nrows(src0); | |
| const bool use_src1 = src1 != nullptr; | |
| const int64_t nrows1 = use_src1 ? ggml_nrows(src1) : 1; | |
| GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_GPU_SPLIT); | |
| GGML_ASSERT( dst->backend != GGML_BACKEND_GPU_SPLIT); | |
| ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; | |
| ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr; | |
| ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra; | |
| const bool src0_on_device = src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT; | |
| const bool src1_on_device = use_src1 && src1->backend == GGML_BACKEND_GPU; | |
| const bool dst_on_device = dst->backend == GGML_BACKEND_GPU; | |
| // dd = data device | |
| float * src0_ddf = nullptr; | |
| float * src1_ddf = nullptr; | |
| float * dst_ddf = nullptr; | |
| cuda_pool_alloc<float> src0_f; | |
| cuda_pool_alloc<float> src1_f; | |
| cuda_pool_alloc<float> dst_f; | |
| ggml_cuda_set_device(g_main_device); | |
| cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; | |
| if (src0_on_device) { | |
| src0_ddf = (float *) src0_extra->data_device[g_main_device]; | |
| } else { | |
| src0_ddf = src0_f.alloc(ggml_nelements(src0)); | |
| CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddf, src0, 0, 0, 0, nrows0, main_stream)); | |
| } | |
| if (use_src1) { | |
| if (src1_on_device) { | |
| src1_ddf = (float *) src1_extra->data_device[g_main_device]; | |
| } else { | |
| src1_ddf = src1_f.alloc(ggml_nelements(src1)); | |
| CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf, src1, 0, 0, 0, nrows1, main_stream)); | |
| } | |
| } | |
| if (dst_on_device) { | |
| dst_ddf = (float *) dst_extra->data_device[g_main_device]; | |
| } else { | |
| dst_ddf = dst_f.alloc(ggml_nelements(dst)); | |
| } | |
| // do the computation | |
| op(src0, src1, dst, src0_ddf, src1_ddf, dst_ddf, main_stream); | |
| CUDA_CHECK(cudaGetLastError()); | |
| // copy dst to host if necessary | |
| if (!dst_on_device) { | |
| CUDA_CHECK(cudaMemcpyAsync(dst->data, dst_ddf, ggml_nbytes(dst), cudaMemcpyDeviceToHost, main_stream)); | |
| } | |
| if (dst->backend == GGML_BACKEND_CPU) { | |
| CUDA_CHECK(cudaDeviceSynchronize()); | |
| } | |
| } | |
| static void ggml_cuda_set_peer_access(const int n_tokens) { | |
| static bool peer_access_enabled = false; | |
| const bool enable_peer_access = n_tokens <= GGML_CUDA_PEER_MAX_BATCH_SIZE; | |
| if (peer_access_enabled == enable_peer_access) { | |
| return; | |
| } | |
| for (int id = 0; id < g_device_count; ++id) { | |
| ggml_cuda_set_device(id); | |
| CUDA_CHECK(cudaDeviceSynchronize()); | |
| } | |
| for (int id = 0; id < g_device_count; ++id) { | |
| ggml_cuda_set_device(id); | |
| for (int id_other = 0; id_other < g_device_count; ++id_other) { | |
| if (id == id_other) { | |
| continue; | |
| } | |
| if (id != g_main_device && id_other != g_main_device) { | |
| continue; | |
| } | |
| int can_access_peer; | |
| CUDA_CHECK(cudaDeviceCanAccessPeer(&can_access_peer, id, id_other)); | |
| if (can_access_peer) { | |
| if (enable_peer_access) { | |
| CUDA_CHECK(cudaDeviceEnablePeerAccess(id_other, 0)); | |
| } else { | |
| CUDA_CHECK(cudaDeviceDisablePeerAccess(id_other)); | |
| } | |
| } | |
| } | |
| } | |
| peer_access_enabled = enable_peer_access; | |
| } | |
| static void ggml_cuda_op_mul_mat( | |
| const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, ggml_cuda_op_mul_mat_t op, | |
| const bool convert_src1_to_q8_1) { | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t ne01 = src0->ne[1]; | |
| const int64_t ne02 = src0->ne[2]; | |
| const int64_t ne03 = src0->ne[3]; | |
| const int64_t ne10 = src1->ne[0]; | |
| const int64_t ne11 = src1->ne[1]; | |
| const int64_t ne12 = src1->ne[2]; | |
| const int64_t ne13 = src1->ne[3]; | |
| const int64_t nrows1 = ggml_nrows(src1); | |
| GGML_ASSERT(ne03 == ne13); | |
| const int64_t ne0 = dst->ne[0]; | |
| const int64_t ne1 = dst->ne[1]; | |
| const int nb2 = dst->nb[2]; | |
| const int nb3 = dst->nb[3]; | |
| GGML_ASSERT(dst->backend != GGML_BACKEND_GPU_SPLIT); | |
| GGML_ASSERT(src1->backend != GGML_BACKEND_GPU_SPLIT); | |
| GGML_ASSERT(src1->type == GGML_TYPE_F32 || (src1->ne[2] == 1 && src1->ne[3] == 1)); | |
| GGML_ASSERT(ne12 >= ne02 && ne12 % ne02 == 0); | |
| const int64_t i02_divisor = ne12 / ne02; | |
| const size_t src0_ts = ggml_type_size(src0->type); | |
| const size_t src0_bs = ggml_blck_size(src0->type); | |
| const size_t q8_1_ts = sizeof(block_q8_1); | |
| const size_t q8_1_bs = QK8_1; | |
| ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; | |
| ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra; | |
| ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra; | |
| const bool src0_on_device = src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT; | |
| const bool src0_is_contiguous = ggml_is_contiguous(src0); | |
| const bool src1_is_contiguous = ggml_is_contiguous(src1); | |
| const int64_t src1_padded_col_size = GGML_PAD(ne10, MATRIX_ROW_PADDING); | |
| const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT; | |
| GGML_ASSERT(!(split && ne02 > 1)); | |
| GGML_ASSERT(!(split && ne03 > 1)); | |
| GGML_ASSERT(!(split && ne02 < ne12)); | |
| struct dev_data { | |
| cuda_pool_alloc<char> src0_dd_alloc; | |
| cuda_pool_alloc<float> src1_ddf_alloc; | |
| cuda_pool_alloc<char> src1_ddq_alloc; | |
| cuda_pool_alloc<float> dst_dd_alloc; | |
| char * src0_dd = nullptr; | |
| float * src1_ddf = nullptr; // float | |
| char * src1_ddq = nullptr; // q8_1 | |
| float * dst_dd = nullptr; | |
| int64_t row_low; | |
| int64_t row_high; | |
| }; | |
| dev_data dev[GGML_CUDA_MAX_DEVICES]; | |
| int used_devices = 0; | |
| for (int id = 0; id < g_device_count; ++id) { | |
| // by default, use all rows | |
| dev[id].row_low = 0; | |
| dev[id].row_high = ne01; | |
| // for multi GPU, get the row boundaries from tensor split | |
| // and round to mul_mat_q tile sizes | |
| if (split) { | |
| const int64_t rounding = get_row_rounding(src0->type); | |
| if (id != 0) { | |
| dev[id].row_low = ne01*g_tensor_split[id]; | |
| if (dev[id].row_low < ne01) { | |
| dev[id].row_low -= dev[id].row_low % rounding; | |
| } | |
| } | |
| if (id != g_device_count - 1) { | |
| dev[id].row_high = ne01*g_tensor_split[id + 1]; | |
| if (dev[id].row_high < ne01) { | |
| dev[id].row_high -= dev[id].row_high % rounding; | |
| } | |
| } | |
| } | |
| } | |
| for (int id = 0; id < g_device_count; ++id) { | |
| if ((!split && id != g_main_device) || dev[id].row_low == dev[id].row_high) { | |
| continue; | |
| } | |
| used_devices++; | |
| const bool src1_on_device = src1->backend == GGML_BACKEND_GPU && id == g_main_device; | |
| const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device; | |
| ggml_cuda_set_device(id); | |
| cudaStream_t stream = g_cudaStreams[id][0]; | |
| if (src0_on_device && src0_is_contiguous) { | |
| dev[id].src0_dd = (char *) src0_extra->data_device[id]; | |
| } else { | |
| dev[id].src0_dd = dev[id].src0_dd_alloc.alloc(ggml_nbytes(src0)); | |
| } | |
| if (src1_on_device && src1_is_contiguous) { | |
| dev[id].src1_ddf = (float *) src1_extra->data_device[id]; | |
| } else { | |
| dev[id].src1_ddf = dev[id].src1_ddf_alloc.alloc(ggml_nelements(src1)); | |
| } | |
| if (convert_src1_to_q8_1) { | |
| dev[id].src1_ddq = dev[id].src1_ddq_alloc.alloc(nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs); | |
| if (src1_on_device && src1_is_contiguous) { | |
| quantize_row_q8_1_cuda(dev[id].src1_ddf, dev[id].src1_ddq, ne10, nrows1, src1_padded_col_size, stream); | |
| CUDA_CHECK(cudaGetLastError()); | |
| } | |
| } | |
| if (dst_on_device) { | |
| dev[id].dst_dd = (float *) dst_extra->data_device[id]; | |
| } else { | |
| const size_t size_dst_ddf = split ? (dev[id].row_high - dev[id].row_low)*ne1 : ggml_nelements(dst); | |
| dev[id].dst_dd = dev[id].dst_dd_alloc.alloc(size_dst_ddf); | |
| } | |
| } | |
| // if multiple devices are used they need to wait for the main device | |
| // here an event is recorded that signals that the main device has finished calculating the input data | |
| if (split && used_devices > 1) { | |
| ggml_cuda_set_device(g_main_device); | |
| CUDA_CHECK(cudaEventRecord(src0_extra->events[g_main_device][0], g_cudaStreams[g_main_device][0])); | |
| } | |
| const int64_t src1_col_stride = split && used_devices > 1 ? MUL_MAT_SRC1_COL_STRIDE : ne11; | |
| for (int64_t src1_col_0 = 0; src1_col_0 < ne11; src1_col_0 += src1_col_stride) { | |
| const int64_t is = split ? (src1_col_0/src1_col_stride) % MAX_STREAMS : 0; | |
| const int64_t src1_ncols = src1_col_0 + src1_col_stride > ne11 ? ne11 - src1_col_0 : src1_col_stride; | |
| for (int id = 0; id < g_device_count; ++id) { | |
| if ((!split && id != g_main_device) || dev[id].row_low == dev[id].row_high) { | |
| continue; | |
| } | |
| const bool src1_on_device = src1->backend == GGML_BACKEND_GPU && id == g_main_device; | |
| const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device; | |
| const int64_t row_diff = dev[id].row_high - dev[id].row_low; | |
| ggml_cuda_set_device(id); | |
| cudaStream_t stream = g_cudaStreams[id][is]; | |
| // wait for main GPU data if necessary | |
| if (split && (id != g_main_device || is != 0)) { | |
| CUDA_CHECK(cudaStreamWaitEvent(stream, src0_extra->events[g_main_device][0], 0)); | |
| } | |
| for (int64_t i0 = 0; i0 < ne13*ne12; ++i0) { | |
| const int64_t i03 = i0 / ne12; | |
| const int64_t i02 = i0 % ne12; | |
| const size_t src1_ddq_i_offset = (i0*ne11 + src1_col_0) * src1_padded_col_size*q8_1_ts/q8_1_bs; | |
| // for split tensors the data begins at i0 == i0_offset_low | |
| char * src0_dd_i = dev[id].src0_dd + (i0/i02_divisor) * (ne01*ne00*src0_ts)/src0_bs; | |
| float * src1_ddf_i = dev[id].src1_ddf + (i0*ne11 + src1_col_0) * ne10; | |
| char * src1_ddq_i = dev[id].src1_ddq + src1_ddq_i_offset; | |
| float * dst_dd_i = dev[id].dst_dd + (i0*ne1 + src1_col_0) * (dst_on_device ? ne0 : row_diff); | |
| // the main device memory buffer can be on VRAM scratch, with space for all partial results | |
| // in that case an offset on dst_ddf_i is needed | |
| if (dst->backend == GGML_BACKEND_GPU && id == g_main_device) { | |
| dst_dd_i += dev[id].row_low; // offset is 0 if no tensor split | |
| } | |
| // copy src0, src1 to device if necessary | |
| if (src1->backend == GGML_BACKEND_GPU && src1_is_contiguous) { | |
| if (id != g_main_device) { | |
| if (convert_src1_to_q8_1) { | |
| char * src1_ddq_i_source = dev[g_main_device].src1_ddq + src1_ddq_i_offset; | |
| CUDA_CHECK(cudaMemcpyPeerAsync(src1_ddq_i, id, src1_ddq_i_source, g_main_device, | |
| src1_ncols*src1_padded_col_size*q8_1_ts/q8_1_bs, stream)); | |
| } else { | |
| float * src1_ddf_i_source = (float *) src1_extra->data_device[g_main_device]; | |
| src1_ddf_i_source += (i0*ne11 + src1_col_0) * ne10; | |
| CUDA_CHECK(cudaMemcpyPeerAsync(src1_ddf_i, id, src1_ddf_i_source, g_main_device, | |
| src1_ncols*ne10*sizeof(float), stream)); | |
| } | |
| } | |
| } else if (src1->backend == GGML_BACKEND_CPU || (src1_on_device && !src1_is_contiguous)) { | |
| CUDA_CHECK(ggml_cuda_cpy_tensor_2d( | |
| src1_ddf_i, src1, i03, i02, src1_col_0, src1_col_0+src1_ncols, stream)); | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| if (convert_src1_to_q8_1 && (src1->backend == GGML_BACKEND_CPU || !src1_is_contiguous)) { | |
| quantize_row_q8_1_cuda(src1_ddf_i, src1_ddq_i, ne10, src1_ncols, src1_padded_col_size, stream); | |
| CUDA_CHECK(cudaGetLastError()); | |
| } | |
| if (src1_col_0 == 0 && (!src0_on_device || !src0_is_contiguous) && i02 % i02_divisor == 0) { | |
| CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_dd_i, src0, i03, i02/i02_divisor, dev[id].row_low, dev[id].row_high, stream)); | |
| } | |
| // do the computation | |
| op(src0, src1, dst, src0_dd_i, src1_ddf_i, src1_ddq_i, dst_dd_i, | |
| dev[id].row_low, dev[id].row_high, src1_ncols, src1_padded_col_size, stream); | |
| CUDA_CHECK(cudaGetLastError()); | |
| // copy dst to host or other device if necessary | |
| if (!dst_on_device) { | |
| void * dst_off_device; | |
| cudaMemcpyKind kind; | |
| if (dst->backend == GGML_BACKEND_CPU) { | |
| dst_off_device = dst->data; | |
| kind = cudaMemcpyDeviceToHost; | |
| } else if (dst->backend == GGML_BACKEND_GPU) { | |
| dst_off_device = dst_extra->data_device[g_main_device]; | |
| kind = cudaMemcpyDeviceToDevice; | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| if (split) { | |
| // src0 = weight matrix is saved as a transposed matrix for better memory layout. | |
| // dst is NOT transposed. | |
| // The outputs of matrix matrix multiplications can therefore NOT simply be concatenated for >1 GPU. | |
| // Instead they need to be copied to the correct slice in ne0 = dst row index. | |
| // If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results. | |
| float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3); | |
| GGML_ASSERT(dst->nb[1] == ne0*sizeof(float)); | |
| dhf_dst_i += src1_col_0*ne0 + dev[id].row_low; | |
| if (kind == cudaMemcpyDeviceToDevice) { | |
| // cudaMemcpy2DAsync may fail with copies between vmm pools of different devices | |
| cudaMemcpy3DPeerParms p = {}; | |
| p.dstDevice = g_main_device; | |
| p.dstPtr = make_cudaPitchedPtr(dhf_dst_i, ne0*sizeof(float), row_diff, src1_ncols); | |
| p.srcDevice = id; | |
| p.srcPtr = make_cudaPitchedPtr(dst_dd_i, row_diff*sizeof(float), row_diff, src1_ncols); | |
| p.extent = make_cudaExtent(row_diff*sizeof(float), src1_ncols, 1); | |
| CUDA_CHECK(cudaMemcpy3DPeerAsync(&p, stream)); | |
| } else | |
| { | |
| CUDA_CHECK(cudaMemcpy2DAsync(dhf_dst_i, ne0*sizeof(float), | |
| dst_dd_i, row_diff*sizeof(float), | |
| row_diff*sizeof(float), src1_ncols, | |
| kind, stream)); | |
| } | |
| } else { | |
| float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3); | |
| GGML_ASSERT(dst->nb[1] == ne0*sizeof(float)); | |
| dhf_dst_i += src1_col_0*ne0; | |
| CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_dd_i, src1_ncols*ne0*sizeof(float), kind, stream)); | |
| } | |
| } | |
| // add event for the main device to wait on until other device is done | |
| if (split && (id != g_main_device || is != 0)) { | |
| CUDA_CHECK(cudaEventRecord(src0_extra->events[id][is], stream)); | |
| } | |
| } | |
| } | |
| } | |
| // main device waits for all other devices to be finished | |
| if (split && g_device_count > 1) { | |
| int64_t is_max = (ne11 + MUL_MAT_SRC1_COL_STRIDE - 1) / MUL_MAT_SRC1_COL_STRIDE; | |
| is_max = is_max <= MAX_STREAMS ? is_max : MAX_STREAMS; | |
| ggml_cuda_set_device(g_main_device); | |
| for (int id = 0; id < g_device_count; ++id) { | |
| if (dev[id].row_low == dev[id].row_high) { | |
| continue; | |
| } | |
| for (int64_t is = 0; is < is_max; ++is) { | |
| CUDA_CHECK(cudaStreamWaitEvent(g_cudaStreams[g_main_device][0], src0_extra->events[id][is], 0)); | |
| } | |
| } | |
| } | |
| if (dst->backend == GGML_BACKEND_CPU) { | |
| ggml_cuda_set_device(g_main_device); | |
| CUDA_CHECK(cudaDeviceSynchronize()); | |
| } | |
| } | |
| static void ggml_cuda_repeat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_repeat); | |
| } | |
| static void ggml_cuda_get_rows(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_get_rows); | |
| } | |
| static void ggml_cuda_add(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_add); | |
| } | |
| static void ggml_cuda_acc(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_acc); | |
| } | |
| static void ggml_cuda_mul(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_mul); | |
| } | |
| static void ggml_cuda_div(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_div); | |
| } | |
| static void ggml_cuda_gelu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_gelu); | |
| } | |
| static void ggml_cuda_silu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_silu); | |
| } | |
| static void ggml_cuda_gelu_quick(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_gelu_quick); | |
| } | |
| static void ggml_cuda_tanh(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_tanh); | |
| } | |
| static void ggml_cuda_relu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_relu); | |
| } | |
| static void ggml_cuda_leaky_relu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_leaky_relu); | |
| } | |
| static void ggml_cuda_sqr(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_sqr); | |
| } | |
| static void ggml_cuda_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_norm); | |
| } | |
| static void ggml_cuda_group_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_group_norm); | |
| } | |
| static void ggml_cuda_concat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_concat); | |
| } | |
| static void ggml_cuda_upscale(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_upscale); | |
| } | |
| static void ggml_cuda_pad(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_pad); | |
| } | |
| static void ggml_cuda_rms_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_rms_norm); | |
| } | |
| bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { | |
| if (!g_cublas_loaded) return false; | |
| const int64_t ne10 = src1->ne[0]; | |
| const int64_t ne0 = dst->ne[0]; | |
| const int64_t ne1 = dst->ne[1]; | |
| // TODO: find the optimal values for these | |
| return (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && | |
| src1->type == GGML_TYPE_F32 && | |
| dst->type == GGML_TYPE_F32 && | |
| (ne0 >= 32 && ne1 >= 32 && ne10 >= 32); | |
| } | |
| static void ggml_cuda_mul_mat_vec_p021(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){ | |
| GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1)); | |
| GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT); | |
| GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation | |
| GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // 0213 permutation | |
| GGML_ASSERT(src0->type == GGML_TYPE_F16); | |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t ne01 = src0->ne[1]; | |
| const int64_t ne02 = src0->ne[2]; | |
| const int64_t ne12 = src1->ne[2]; | |
| ggml_cuda_set_device(g_main_device); | |
| cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; | |
| ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; | |
| void * src0_ddq = src0_extra->data_device[g_main_device]; | |
| ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra; | |
| float * src1_ddf = (float *) src1_extra->data_device[g_main_device]; | |
| ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra; | |
| float * dst_ddf = (float *) dst_extra->data_device[g_main_device]; | |
| ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, main_stream); | |
| } | |
| static void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){ | |
| GGML_ASSERT(!ggml_is_transposed(src0)); | |
| GGML_ASSERT(!ggml_is_transposed(src1)); | |
| GGML_ASSERT(!ggml_is_permuted(src0)); | |
| GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT); | |
| GGML_ASSERT(src0->type == GGML_TYPE_F16); | |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t ne01 = src0->ne[1]; | |
| const int64_t ne02 = src0->ne[2]; | |
| const int64_t nb01 = src0->nb[1]; | |
| const int64_t nb02 = src0->nb[2]; | |
| const int64_t ne12 = src1->ne[2]; | |
| ggml_cuda_set_device(g_main_device); | |
| cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; | |
| ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; | |
| void * src0_ddq = src0_extra->data_device[g_main_device]; | |
| ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra; | |
| float * src1_ddf = (float *) src1_extra->data_device[g_main_device]; | |
| ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra; | |
| float * dst_ddf = (float *) dst_extra->data_device[g_main_device]; | |
| const int64_t row_stride_x = nb01 / sizeof(half); | |
| const int64_t channel_stride_x = nb02 / sizeof(half); | |
| ggml_mul_mat_vec_nc_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, ne12, channel_stride_x, main_stream); | |
| } | |
| static __global__ void k_compute_batched_ptrs( | |
| const half * src0_as_f16, const half * src1_as_f16, char * dst, | |
| const void ** ptrs_src, void ** ptrs_dst, | |
| int64_t ne12, int64_t ne13, | |
| int64_t ne23, | |
| size_t nb02, size_t nb03, | |
| size_t nb12, size_t nb13, | |
| size_t nbd2, size_t nbd3, | |
| int64_t r2, int64_t r3) { | |
| int64_t i13 = blockIdx.x * blockDim.x + threadIdx.x; | |
| int64_t i12 = blockIdx.y * blockDim.y + threadIdx.y; | |
| if (i13 >= ne13 || i12 >= ne12) { | |
| return; | |
| } | |
| int64_t i03 = i13 / r3; | |
| int64_t i02 = i12 / r2; | |
| ptrs_src[0*ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03; | |
| ptrs_src[1*ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12 + i13*nb13; | |
| ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3; | |
| } | |
| static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| GGML_ASSERT(!ggml_is_transposed(src0)); | |
| GGML_ASSERT(!ggml_is_transposed(src1)); | |
| GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT); | |
| GGML_ASSERT(src0->type == GGML_TYPE_F16); | |
| GGML_TENSOR_BINARY_OP_LOCALS | |
| const int64_t ne_dst = ggml_nelements(dst); | |
| ggml_cuda_set_device(g_main_device); | |
| cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; | |
| CUBLAS_CHECK(cublasSetStream(g_cublas_handles[g_main_device], main_stream)); | |
| ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; | |
| void * src0_ddq = src0_extra->data_device[g_main_device]; | |
| half * src0_f16 = (half *) src0_ddq; | |
| ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra; | |
| float * src1_ddf = (float *) src1_extra->data_device[g_main_device]; | |
| ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra; | |
| float * dst_ddf = (float *) dst_extra->data_device[g_main_device]; | |
| // convert src1 to fp16 | |
| cuda_pool_alloc<half> src1_f16_alloc; | |
| if (src1->type != GGML_TYPE_F16) { | |
| const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); | |
| const int64_t ne_src1 = ggml_nelements(src1); | |
| src1_f16_alloc.alloc(ne_src1); | |
| GGML_ASSERT(to_fp16_cuda != nullptr); | |
| to_fp16_cuda(src1_ddf, src1_f16_alloc.get(), ne_src1, main_stream); | |
| } | |
| half * src1_f16 = src1->type == GGML_TYPE_F16 ? (half *) src1_ddf : src1_f16_alloc.get(); | |
| cuda_pool_alloc<half> dst_f16; | |
| char * dst_t; | |
| cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; | |
| cudaDataType_t cu_data_type = CUDA_R_16F; | |
| // dst strides | |
| size_t nbd2 = dst->nb[2]; | |
| size_t nbd3 = dst->nb[3]; | |
| const half alpha_f16 = 1.0f; | |
| const half beta_f16 = 0.0f; | |
| const float alpha_f32 = 1.0f; | |
| const float beta_f32 = 0.0f; | |
| const void * alpha = &alpha_f16; | |
| const void * beta = &beta_f16; | |
| if (dst->op_params[0] == GGML_PREC_DEFAULT) { | |
| dst_t = (char *) dst_f16.alloc(ne_dst); | |
| nbd2 /= sizeof(float) / sizeof(half); | |
| nbd3 /= sizeof(float) / sizeof(half); | |
| } else { | |
| dst_t = (char *) dst_ddf; | |
| cu_compute_type = CUBLAS_COMPUTE_32F; | |
| cu_data_type = CUDA_R_32F; | |
| alpha = &alpha_f32; | |
| beta = &beta_f32; | |
| } | |
| GGML_ASSERT(ne12 % ne02 == 0); | |
| GGML_ASSERT(ne13 % ne03 == 0); | |
| // broadcast factors | |
| const int64_t r2 = ne12/ne02; | |
| const int64_t r3 = ne13/ne03; | |
| // use cublasGemmEx | |
| { | |
| for (int i13 = 0; i13 < ne13; ++i13) { | |
| for (int i12 = 0; i12 < ne12; ++i12) { | |
| int i03 = i13 / r3; | |
| int i02 = i12 / r2; | |
| CUBLAS_CHECK( | |
| cublasGemmEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, | |
| ne01, ne11, ne10, | |
| alpha, (const char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3] , CUDA_R_16F, nb01/sizeof(half), | |
| (const char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2, CUDA_R_16F, nb11/sizeof(float), | |
| beta, ( char *) dst_t + i12*nbd2 + i13*nbd3, cu_data_type, ne01, | |
| cu_compute_type, | |
| CUBLAS_GEMM_DEFAULT_TENSOR_OP)); | |
| } | |
| } | |
| } | |
| if (r2 == 1 && r3 == 1 && src0->nb[2]*src0->ne[2] == src0->nb[3] && src1->nb[2]*src1->ne[2] == src1->nb[3]) { | |
| // there is no broadcast and src0, src1 are contiguous across dims 2, 3 | |
| // use cublasGemmStridedBatchedEx | |
| CUBLAS_CHECK( | |
| cublasGemmStridedBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, | |
| ne01, ne11, ne10, | |
| alpha, (const char *) src0_f16, CUDA_R_16F, nb01/nb00, nb02/nb00, // strideA | |
| (const char *) src1_f16, CUDA_R_16F, nb11/nb10, nb12/nb10, // strideB | |
| beta, ( char *) dst_t, cu_data_type, ne01, nb2/nb0, // strideC | |
| ne12*ne13, | |
| cu_compute_type, | |
| CUBLAS_GEMM_DEFAULT_TENSOR_OP)); | |
| } else { | |
| // use cublasGemmBatchedEx | |
| const int ne23 = ne12*ne13; | |
| cuda_pool_alloc<const void *> ptrs_src(2*ne23); | |
| cuda_pool_alloc< void *> ptrs_dst(1*ne23); | |
| dim3 block_dims(ne13, ne12); | |
| k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( | |
| src0_f16, src1_f16, dst_t, | |
| ptrs_src.get(), ptrs_dst.get(), | |
| ne12, ne13, | |
| ne23, | |
| nb02, nb03, | |
| src1->type == GGML_TYPE_F16 ? nb12 : nb12/2, | |
| src1->type == GGML_TYPE_F16 ? nb13 : nb13/2, | |
| nbd2, nbd3, | |
| r2, r3); | |
| CUDA_CHECK(cudaGetLastError()); | |
| CUBLAS_CHECK( | |
| cublasGemmBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, | |
| ne01, ne11, ne10, | |
| alpha, (const void **) (ptrs_src.get() + 0*ne23), CUDA_R_16F, nb01/nb00, | |
| (const void **) (ptrs_src.get() + 1*ne23), CUDA_R_16F, nb11/nb10, | |
| beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne01, | |
| ne23, | |
| cu_compute_type, | |
| CUBLAS_GEMM_DEFAULT_TENSOR_OP)); | |
| } | |
| if (dst->op_params[0] == GGML_PREC_DEFAULT) { | |
| const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); | |
| to_fp32_cuda(dst_f16.get(), dst_ddf, ne_dst, main_stream); | |
| } | |
| } | |
| static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| const bool all_on_device = | |
| (src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT) && | |
| (src1->backend == GGML_BACKEND_GPU) && | |
| ( dst->backend == GGML_BACKEND_GPU); | |
| const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT; | |
| int64_t min_compute_capability = INT_MAX; | |
| for (int id = 0; id < g_device_count; ++id) { | |
| if (min_compute_capability > g_device_caps[id].cc && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { | |
| min_compute_capability = g_device_caps[id].cc; | |
| } | |
| } | |
| const bool fp16_performance_good = min_compute_capability >= CC_RDNA1; | |
| bool use_mul_mat_q = ggml_is_quantized(src0->type); | |
| use_mul_mat_q = use_mul_mat_q && min_compute_capability < CC_RDNA3; | |
| const bool fp16_performance_good = min_compute_capability >= CC_VOLTA; | |
| bool use_mul_mat_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized(src0->type); | |
| // when tensor cores are available, use them for large batch size | |
| // ref: https://github.com/ggerganov/llama.cpp/pull/3776 | |
| use_mul_mat_q = use_mul_mat_q && !(fp16_performance_good && src1->ne[1] > MMQ_MAX_BATCH_SIZE); | |
| use_mul_mat_q = use_mul_mat_q && ggml_cuda_supports_mmq(src0->type); | |
| // debug helpers | |
| //printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]); | |
| //printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]); | |
| //printf("src1: %8d %8d %8d %8d\n", src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3]); | |
| //printf(" %8d %8d %8d %8d\n", src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3]); | |
| //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); | |
| //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); | |
| if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { | |
| // KQ single-batch | |
| ggml_cuda_mul_mat_vec_p021(src0, src1, dst); | |
| } else if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { | |
| // KQV single-batch | |
| ggml_cuda_mul_mat_vec_nc(src0, src1, dst); | |
| } else if (!split && all_on_device && fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { | |
| // KQ + KQV multi-batch | |
| ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst); | |
| } else if (src0->type == GGML_TYPE_F32) { | |
| ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false); | |
| } else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) { | |
| if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0 && src1->type == GGML_TYPE_F32) { | |
| const bool use_mul_mat_vec_q = false; | |
| const bool use_mul_mat_vec_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized(src0->type) && ggml_nrows(src1) == 1; | |
| if (use_mul_mat_vec_q) { | |
| // NOTE: this kernel does not support ggml_nrows(src1) > 1 | |
| ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_vec_q, true); | |
| } else { | |
| ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false); | |
| } | |
| } else { | |
| if (use_mul_mat_q) { | |
| ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_q, true); | |
| } else { | |
| ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false); | |
| } | |
| } | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| } | |
| template<typename ... Srcs> | |
| static __global__ void k_compute_batched_ptrs_id( | |
| const void ** ptrs_src, void ** ptrs_dst, | |
| int ne12, int ne13, | |
| int ne23, | |
| int nb02, int nb03, | |
| int nb12, int nb13, | |
| int nb2, int nb3, | |
| int r2, int r3, | |
| ggml_type src0_type, half * src0_as_f16, int64_t src0_ne, | |
| const half * src1_f16, half * dst_f16, | |
| const int32_t * ids, const int id, | |
| Srcs... src0s) { | |
| int i = ids[id]; | |
| half * src0_f16; | |
| const void * srcs_ar[] = { (const half *) src0s... }; | |
| if (src0_type == GGML_TYPE_F16) { | |
| src0_f16 = (half *) srcs_ar[i]; | |
| } else { | |
| src0_f16 = src0_as_f16; | |
| if (threadIdx.x == 0 && threadIdx.y == 0) { | |
| const to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(src0_type); | |
| to_fp16(srcs_ar[i], src0_f16, src0_ne, cudaStreamFireAndForget); | |
| } | |
| } | |
| int i13 = blockIdx.x * blockDim.x + threadIdx.x; | |
| int i12 = blockIdx.y * blockDim.y + threadIdx.y; | |
| if (i13 >= ne13 || i12 >= ne12) { | |
| return; | |
| } | |
| int i03 = i13 / r3; | |
| int i02 = i12 / r2; | |
| ptrs_src[0*ne23 + i12 + i13*ne12] = (const char *) src0_f16 + i02*nb02 + i03*nb03; | |
| ptrs_src[1*ne23 + i12 + i13*ne12] = (const char *) src1_f16 + i12*nb12/2 + i13*nb13/2; | |
| ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst_f16 + i12* nb2/2 + i13* nb3/2; | |
| } | |
| static void ggml_cuda_mul_mat_id_cublas(ggml_tensor * dst) { | |
| const struct ggml_tensor * ids = dst->src[0]; | |
| const struct ggml_tensor * src1 = dst->src[1]; | |
| const struct ggml_tensor * src00 = dst->src[2]; | |
| const int id = dst->op_params[0]; | |
| GGML_ASSERT(!ggml_is_transposed(src00)); | |
| GGML_ASSERT(!ggml_is_transposed(src1)); | |
| GGML_ASSERT(src00->backend != GGML_BACKEND_GPU_SPLIT); | |
| GGML_ASSERT(src1->type == GGML_TYPE_F32); | |
| const int64_t ne00 = src00->ne[0]; GGML_UNUSED(ne00); | |
| const int64_t ne01 = src00->ne[1]; | |
| const int64_t ne02 = src00->ne[2]; | |
| const int64_t ne03 = src00->ne[3]; | |
| //const int64_t nb01 = src00->nb[1]; | |
| const int64_t nb02 = src00->nb[2]; GGML_UNUSED(nb02); | |
| const int64_t nb03 = src00->nb[3]; GGML_UNUSED(nb03); | |
| const int64_t ne10 = src1->ne[0]; | |
| const int64_t ne11 = src1->ne[1]; | |
| const int64_t ne12 = src1->ne[2]; | |
| const int64_t ne13 = src1->ne[3]; | |
| //const int64_t nb11 = src1->nb[1]; | |
| const int64_t nb12 = src1->nb[2]; GGML_UNUSED(nb12); | |
| const int64_t nb13 = src1->nb[3]; GGML_UNUSED(nb13); | |
| const int64_t ne1 = ggml_nelements(src1); | |
| const int64_t ne = ggml_nelements(dst); | |
| ggml_cuda_set_device(g_main_device); | |
| cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; | |
| CUBLAS_CHECK(cublasSetStream(g_cublas_handles[g_main_device], main_stream)); | |
| //ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; | |
| //void * src0_ddq = src0_extra->data_device[g_main_device]; | |
| //half * src0_as_f16 = (half *) src0_ddq; | |
| ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra; | |
| float * src1_ddf = (float *) src1_extra->data_device[g_main_device]; | |
| ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra; | |
| float * dst_ddf = (float *) dst_extra->data_device[g_main_device]; | |
| // convert src1 to fp16 | |
| const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); | |
| GGML_ASSERT(to_fp16_cuda != nullptr); | |
| size_t src1_as = 0; | |
| half * src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne1 * sizeof(half), &src1_as); | |
| to_fp16_cuda(src1_ddf, src1_as_f16, ne1, main_stream); | |
| size_t dst_as = 0; | |
| half * dst_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &dst_as); | |
| GGML_ASSERT(ne12 % ne02 == 0); | |
| GGML_ASSERT(ne13 % ne03 == 0); | |
| // broadcast factors | |
| const int64_t r2 = ne12/ne02; | |
| const int64_t r3 = ne13/ne03; | |
| const half alpha_f16 = 1.0f; | |
| const half beta_f16 = 0.0f; | |
| // use cublasGemmBatchedEx | |
| const int ne23 = ne12*ne13; | |
| const void ** ptrs_src = nullptr; | |
| void ** ptrs_dst = nullptr; | |
| size_t ptrs_src_s = 0; | |
| size_t ptrs_dst_s = 0; | |
| ptrs_src = (const void **) ggml_cuda_pool_malloc(2*ne23*sizeof(void *), &ptrs_src_s); | |
| ptrs_dst = ( void **) ggml_cuda_pool_malloc(1*ne23*sizeof(void *), &ptrs_dst_s); | |
| int64_t src0_ne = ggml_nelements(src00); | |
| half * src0_as_f16 = nullptr; | |
| size_t src0_as = 0; | |
| if (src00->type != GGML_TYPE_F16) { | |
| src0_as_f16 = (half *) ggml_cuda_pool_malloc(src0_ne * sizeof(half), &src0_as); | |
| } | |
| static_assert(GGML_MAX_SRC == 6, "GGML_MAX_SRC == 6"); | |
| dim3 block_dims(ne13, ne12); | |
| k_compute_batched_ptrs_id<<<1, block_dims, 0, main_stream>>>( | |
| ptrs_src, ptrs_dst, | |
| ne12, ne13, | |
| ne23, | |
| ne00*ne01*sizeof(half), ne00*ne01*ne02*sizeof(half), | |
| nb12, nb13, | |
| dst->nb[2], dst->nb[3], | |
| r2, r3, | |
| src00->type, src0_as_f16, src0_ne, | |
| src1_as_f16, dst_f16, | |
| (const int *)((ggml_tensor_extra_gpu *)ids->extra)->data_device[g_main_device], id, | |
| dst->src[2] ? (const half *)((ggml_tensor_extra_gpu *)dst->src[2]->extra)->data_device[g_main_device] : nullptr, | |
| dst->src[3] ? (const half *)((ggml_tensor_extra_gpu *)dst->src[3]->extra)->data_device[g_main_device] : nullptr, | |
| dst->src[4] ? (const half *)((ggml_tensor_extra_gpu *)dst->src[4]->extra)->data_device[g_main_device] : nullptr, | |
| dst->src[5] ? (const half *)((ggml_tensor_extra_gpu *)dst->src[5]->extra)->data_device[g_main_device] : nullptr | |
| ); | |
| CUDA_CHECK(cudaGetLastError()); | |
| CUBLAS_CHECK( | |
| cublasGemmBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, | |
| ne01, ne11, ne10, | |
| &alpha_f16, (const void **) (ptrs_src + 0*ne23), CUDA_R_16F, ne00, | |
| (const void **) (ptrs_src + 1*ne23), CUDA_R_16F, ne10, | |
| &beta_f16, ( void **) (ptrs_dst + 0*ne23), CUDA_R_16F, ne01, | |
| ne23, | |
| CUBLAS_COMPUTE_16F, | |
| CUBLAS_GEMM_DEFAULT_TENSOR_OP)); | |
| if (src0_as != 0) { | |
| ggml_cuda_pool_free(src0_as_f16, src0_as); | |
| } | |
| if (ptrs_src_s != 0) { | |
| ggml_cuda_pool_free(ptrs_src, ptrs_src_s); | |
| } | |
| if (ptrs_dst_s != 0) { | |
| ggml_cuda_pool_free(ptrs_dst, ptrs_dst_s); | |
| } | |
| const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); | |
| to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream); | |
| ggml_cuda_pool_free(src1_as_f16, src1_as); | |
| ggml_cuda_pool_free(dst_f16, dst_as); | |
| } | |
| static void ggml_cuda_mul_mat_id(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_mul_mat_id_cublas(dst); | |
| // TODO: mmq/mmv support | |
| const int64_t nb11 = src1->nb[1]; | |
| const int64_t nb1 = dst->nb[1]; | |
| const struct ggml_tensor * ids = src0; | |
| const int32_t id = ((int32_t *) dst->op_params)[0]; | |
| const int32_t n_as = ((int32_t *) dst->op_params)[1]; | |
| std::vector<char> ids_host(ggml_nbytes(ids)); | |
| cudaStream_t stream = g_cudaStreams[g_main_device][0]; | |
| if (ids->backend == GGML_BACKEND_GPU) { | |
| const char * ids_dev = (const char *)((const ggml_tensor_extra_gpu *)ids->extra)->data_device[g_main_device]; | |
| CUDA_CHECK(cudaMemcpyAsync(ids_host.data(), ids_dev, ggml_nbytes(ids), cudaMemcpyDeviceToHost, stream)); | |
| CUDA_CHECK(cudaStreamSynchronize(stream)); | |
| } else { | |
| memcpy(ids_host.data(), ids->data, ggml_nbytes(ids)); | |
| } | |
| const ggml_tensor_extra_gpu * src1_extra = (const ggml_tensor_extra_gpu *) src1->extra; | |
| const ggml_tensor_extra_gpu * dst_extra = (const ggml_tensor_extra_gpu *) dst->extra; | |
| ggml_tensor_extra_gpu src1_row_extra; | |
| ggml_tensor_extra_gpu dst_row_extra; | |
| ggml_tensor src1_row = *src1; | |
| ggml_tensor dst_row = *dst; | |
| src1_row.backend = GGML_BACKEND_GPU; | |
| dst_row.backend = GGML_BACKEND_GPU; | |
| src1_row.extra = &src1_row_extra; | |
| dst_row.extra = &dst_row_extra; | |
| char * src1_original = src1->backend == GGML_BACKEND_CPU ? | |
| (char *) src1->data : (char *) src1_extra->data_device[g_main_device]; | |
| char * dst_original = dst->backend == GGML_BACKEND_CPU ? | |
| (char *) dst->data : (char *) dst_extra->data_device[g_main_device]; | |
| if (src1->ne[1] == 1) { | |
| GGML_ASSERT(src1->backend == GGML_BACKEND_GPU); | |
| GGML_ASSERT(dst->backend == GGML_BACKEND_GPU); | |
| for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) { | |
| //int32_t row_id; | |
| //CUDA_CHECK(cudaMemcpyAsync(&row_id, ids_dev + i01*ids->nb[1] + id*ids->nb[0], sizeof(int32_t), cudaMemcpyDeviceToHost, g_cudaStreams[g_main_device][0])); | |
| //CUDA_CHECK(cudaStreamSynchronize(g_cudaStreams[g_main_device][0])); | |
| const int32_t row_id = *(const int32_t *) (ids_host.data() + i01*ids->nb[1] + id*ids->nb[0]); | |
| GGML_ASSERT(row_id >= 0 && row_id < n_as); | |
| const struct ggml_tensor * src0_row = dst->src[row_id + 2]; | |
| src1_row_extra.data_device[g_main_device] = src1_original + i01*src1->nb[1]; | |
| src1_row.data = (char *) src1->data + i01*src1->nb[1]; // TODO why is this set? | |
| dst_row_extra.data_device[g_main_device] = dst_original + i01*dst->nb[1]; | |
| dst_row.data = (char *) dst->data + i01*dst->nb[1]; // TODO why is this set? | |
| ggml_cuda_mul_mat(src0_row, &src1_row, &dst_row); | |
| } | |
| } else { | |
| cuda_pool_alloc<char> src1_contiguous(sizeof(float)*ggml_nelements(src1)); | |
| cuda_pool_alloc<char> dst_contiguous(sizeof(float)*ggml_nelements(dst)); | |
| src1_row_extra.data_device[g_main_device] = src1_contiguous.get(); | |
| dst_row_extra.data_device[g_main_device] = dst_contiguous.get(); | |
| const cudaMemcpyKind src1_kind = src1->backend == GGML_BACKEND_CPU ? | |
| cudaMemcpyHostToDevice : cudaMemcpyDeviceToDevice; | |
| const cudaMemcpyKind dst_kind = dst->backend == GGML_BACKEND_CPU ? | |
| cudaMemcpyDeviceToHost : cudaMemcpyDeviceToDevice; | |
| for (int32_t row_id = 0; row_id < n_as; ++row_id) { | |
| const struct ggml_tensor * src0_row = dst->src[row_id + 2]; | |
| int64_t num_src1_rows = 0; | |
| for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) { | |
| const int32_t row_id_i = *(const int32_t *) (ids_host.data() + i01*ids->nb[1] + id*ids->nb[0]); | |
| if (row_id_i != row_id) { | |
| continue; | |
| } | |
| GGML_ASSERT(row_id >= 0 && row_id < n_as); | |
| CUDA_CHECK(cudaMemcpyAsync(src1_contiguous.get() + num_src1_rows*nb11, src1_original + i01*nb11, | |
| nb11, src1_kind, stream)); | |
| num_src1_rows++; | |
| } | |
| if (num_src1_rows == 0) { | |
| continue; | |
| } | |
| src1_row.ne[1] = num_src1_rows; | |
| dst_row.ne[1] = num_src1_rows; | |
| src1_row.nb[1] = nb11; | |
| src1_row.nb[2] = num_src1_rows*nb11; | |
| src1_row.nb[3] = num_src1_rows*nb11; | |
| dst_row.nb[1] = nb1; | |
| dst_row.nb[2] = num_src1_rows*nb1; | |
| dst_row.nb[3] = num_src1_rows*nb1; | |
| ggml_cuda_mul_mat(src0_row, &src1_row, &dst_row); | |
| num_src1_rows = 0; | |
| for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) { | |
| const int32_t row_id_i = *(const int32_t *) (ids_host.data() + i01*ids->nb[1] + id*ids->nb[0]); | |
| if (row_id_i != row_id) { | |
| continue; | |
| } | |
| GGML_ASSERT(row_id >= 0 && row_id < n_as); | |
| CUDA_CHECK(cudaMemcpyAsync(dst_original + i01*nb1, dst_contiguous.get() + num_src1_rows*nb1, | |
| nb1, dst_kind, stream)); | |
| num_src1_rows++; | |
| } | |
| } | |
| } | |
| if (dst->backend == GGML_BACKEND_CPU) { | |
| CUDA_CHECK(cudaStreamSynchronize(stream)); | |
| } | |
| } | |
| static void ggml_cuda_scale(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_scale); | |
| } | |
| static void ggml_cuda_clamp(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_clamp); | |
| } | |
| static void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| const int64_t ne = ggml_nelements(src0); | |
| GGML_ASSERT(ne == ggml_nelements(src1)); | |
| GGML_ASSERT(src0->backend == GGML_BACKEND_GPU); | |
| GGML_ASSERT(src1->backend == GGML_BACKEND_GPU); | |
| GGML_ASSERT(ggml_nbytes(src0) <= INT_MAX); | |
| GGML_ASSERT(ggml_nbytes(src1) <= INT_MAX); | |
| const int64_t ne00 = src0->ne[0]; | |
| const int64_t ne01 = src0->ne[1]; | |
| GGML_ASSERT(src0->ne[3] == 1); | |
| const int64_t nb00 = src0->nb[0]; | |
| const int64_t nb01 = src0->nb[1]; | |
| const int64_t nb02 = src0->nb[2]; | |
| const int64_t ne10 = src1->ne[0]; | |
| const int64_t ne11 = src1->ne[1]; | |
| GGML_ASSERT(src1->ne[3] == 1); | |
| const int64_t nb10 = src1->nb[0]; | |
| const int64_t nb11 = src1->nb[1]; | |
| const int64_t nb12 = src1->nb[2]; | |
| ggml_cuda_set_device(g_main_device); | |
| cudaStream_t main_stream = g_cudaStreams[g_main_device][0]; | |
| const ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; | |
| const ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra; | |
| char * src0_ddc = (char *) src0_extra->data_device[g_main_device]; | |
| char * src1_ddc = (char *) src1_extra->data_device[g_main_device]; | |
| if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { | |
| ggml_cpy_f32_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12, main_stream); | |
| } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { | |
| ggml_cpy_f32_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12, main_stream); | |
| } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { | |
| ggml_cpy_f32_q8_0_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12, main_stream); | |
| } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) { | |
| ggml_cpy_f32_q4_0_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12, main_stream); | |
| } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) { | |
| ggml_cpy_f32_q4_1_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12, main_stream); | |
| } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { | |
| ggml_cpy_f16_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12, main_stream); | |
| } else { | |
| fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__, | |
| ggml_type_name(src0->type), ggml_type_name(src1->type)); | |
| GGML_ASSERT(false); | |
| } | |
| (void) dst; | |
| } | |
| static void ggml_cuda_dup(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| // TODO: why do we pass dst as src1 here? | |
| ggml_cuda_cpy(src0, dst, nullptr); | |
| (void) src1; | |
| } | |
| static void ggml_cuda_diag_mask_inf(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_diag_mask_inf); | |
| } | |
| static void ggml_cuda_soft_max(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_soft_max); | |
| } | |
| static void ggml_cuda_rope(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| GGML_ASSERT(ggml_is_contiguous(src0)); // TODO: this restriction is temporary until non-cont support is implemented | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_rope); | |
| } | |
| static void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_alibi); | |
| } | |
| static void ggml_cuda_im2col(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_im2col); | |
| } | |
| static void ggml_cuda_sum_rows(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| GGML_ASSERT(ggml_is_contiguous(src0)); | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_sum_rows); | |
| } | |
| static void ggml_cuda_argsort(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| GGML_ASSERT(ggml_is_contiguous(src0)); | |
| ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_argsort); | |
| } | |
| static void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { | |
| (void) src0; | |
| (void) src1; | |
| (void) dst; | |
| } | |
| static size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) { | |
| static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); | |
| return nrows_split*ggml_row_size(tensor->type, tensor->ne[0]); | |
| } | |
| void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) { | |
| const int64_t nrows = ggml_nrows(tensor); | |
| const int64_t ne0 = tensor->ne[0]; | |
| const size_t nb1 = tensor->nb[1]; | |
| ggml_backend_type backend = tensor->backend; | |
| ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu; | |
| memset(extra, 0, sizeof(*extra)); | |
| for (int id = 0; id < g_device_count; ++id) { | |
| if (backend == GGML_BACKEND_GPU && id != g_main_device) { | |
| continue; | |
| } | |
| ggml_cuda_set_device(id); | |
| int64_t row_low, row_high; | |
| if (backend == GGML_BACKEND_GPU) { | |
| row_low = 0; | |
| row_high = nrows; | |
| } else if (backend == GGML_BACKEND_GPU_SPLIT) { | |
| const int64_t rounding = get_row_rounding(tensor->type); | |
| row_low = id == 0 ? 0 : nrows*g_tensor_split[id]; | |
| row_low -= row_low % rounding; | |
| if (id == g_device_count - 1) { | |
| row_high = nrows; | |
| } else { | |
| row_high = nrows*g_tensor_split[id + 1]; | |
| row_high -= row_high % rounding; | |
| } | |
| } else { | |
| GGML_ASSERT(false); | |
| } | |
| if (row_low == row_high) { | |
| continue; | |
| } | |
| int64_t nrows_split = row_high - row_low; | |
| const size_t offset_split = row_low*nb1; | |
| size_t size = ggml_nbytes_split(tensor, nrows_split); | |
| const size_t original_size = size; | |
| // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses | |
| if (ne0 % MATRIX_ROW_PADDING != 0) { | |
| size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); | |
| } | |
| char * buf; | |
| CUDA_CHECK(cudaMalloc(&buf, size)); | |
| char * buf_host = (char *)data + offset_split; | |
| // set padding to 0 to avoid possible NaN values | |
| if (size > original_size) { | |
| CUDA_CHECK(cudaMemset(buf + original_size, 0, size - original_size)); | |
| } | |
| CUDA_CHECK(cudaMemcpy(buf, buf_host, original_size, cudaMemcpyHostToDevice)); | |
| extra->data_device[id] = buf; | |
| if (backend == GGML_BACKEND_GPU_SPLIT) { | |
| for (int64_t is = 0; is < MAX_STREAMS; ++is) { | |
| CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id][is], cudaEventDisableTiming)); | |
| } | |
| } | |
| } | |
| tensor->extra = extra; | |
| } | |
| void ggml_cuda_free_data(struct ggml_tensor * tensor) { | |
| if (!tensor || !tensor->extra || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) { | |
| return; | |
| } | |
| ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra; | |
| for (int id = 0; id < g_device_count; ++id) { | |
| ggml_cuda_set_device(id); | |
| if (extra->data_device[id] != nullptr) { | |
| CUDA_CHECK(cudaFree(extra->data_device[id])); | |
| } | |
| for (int64_t is = 0; is < MAX_STREAMS; ++is) { | |
| if (extra->events[id][is] != nullptr) { | |
| CUDA_CHECK(cudaEventDestroy(extra->events[id][is])); | |
| } | |
| } | |
| } | |
| delete extra; | |
| } | |
| static ggml_tensor_extra_gpu * g_temp_tensor_extras = nullptr; | |
| static size_t g_temp_tensor_extra_index = 0; | |
| static ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() { | |
| if (g_temp_tensor_extras == nullptr) { | |
| g_temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_CUDA_MAX_NODES]; | |
| } | |
| size_t alloc_index = g_temp_tensor_extra_index; | |
| g_temp_tensor_extra_index = (g_temp_tensor_extra_index + 1) % GGML_CUDA_MAX_NODES; | |
| ggml_tensor_extra_gpu * extra = &g_temp_tensor_extras[alloc_index]; | |
| memset(extra, 0, sizeof(*extra)); | |
| return extra; | |
| } | |
| static void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bool force_inplace, bool no_alloc) { | |
| if (scratch && g_scratch_size == 0) { | |
| return; | |
| } | |
| tensor->backend = GGML_BACKEND_GPU; | |
| // recursively assign CUDA buffers until a compute tensor is found | |
| if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_CPU) { | |
| const ggml_op src0_op = tensor->src[0]->op; | |
| if (src0_op == GGML_OP_RESHAPE || src0_op == GGML_OP_TRANSPOSE || src0_op == GGML_OP_VIEW || src0_op == GGML_OP_PERMUTE) { | |
| ggml_cuda_assign_buffers_impl(tensor->src[0], scratch, force_inplace, no_alloc); | |
| } | |
| } | |
| if (tensor->op == GGML_OP_CPY && tensor->src[1]->backend == GGML_BACKEND_CPU) { | |
| ggml_cuda_assign_buffers_impl(tensor->src[1], scratch, force_inplace, no_alloc); | |
| } | |
| if (scratch && no_alloc) { | |
| return; | |
| } | |
| ggml_tensor_extra_gpu * extra; | |
| const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) || | |
| tensor->op == GGML_OP_VIEW || | |
| force_inplace; | |
| const size_t size = ggml_nbytes(tensor); | |
| ggml_cuda_set_device(g_main_device); | |
| if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) { | |
| ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra; | |
| char * src0_ddc = (char *) src0_extra->data_device[g_main_device]; | |
| size_t offset = 0; | |
| if (tensor->op == GGML_OP_VIEW) { | |
| memcpy(&offset, tensor->op_params, sizeof(size_t)); | |
| } | |
| extra = ggml_cuda_alloc_temp_tensor_extra(); | |
| extra->data_device[g_main_device] = src0_ddc + offset; | |
| } else if (tensor->op == GGML_OP_CPY) { | |
| ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu * ) tensor->src[1]->extra; | |
| void * src1_ddv = src1_extra->data_device[g_main_device]; | |
| extra = ggml_cuda_alloc_temp_tensor_extra(); | |
| extra->data_device[g_main_device] = src1_ddv; | |
| } else if (scratch) { | |
| GGML_ASSERT(size <= g_scratch_size); | |
| if (g_scratch_offset + size > g_scratch_size) { | |
| g_scratch_offset = 0; | |
| } | |
| char * data = (char *) g_scratch_buffer; | |
| if (data == nullptr) { | |
| CUDA_CHECK(cudaMalloc(&data, g_scratch_size)); | |
| g_scratch_buffer = data; | |
| } | |
| extra = ggml_cuda_alloc_temp_tensor_extra(); | |
| extra->data_device[g_main_device] = data + g_scratch_offset; | |
| g_scratch_offset += size; | |
| GGML_ASSERT(g_scratch_offset <= g_scratch_size); | |
| } else { // allocate new buffers outside of scratch | |
| void * data; | |
| CUDA_CHECK(cudaMalloc(&data, size)); | |
| CUDA_CHECK(cudaMemset(data, 0, size)); | |
| extra = new ggml_tensor_extra_gpu; | |
| memset(extra, 0, sizeof(*extra)); | |
| extra->data_device[g_main_device] = data; | |
| } | |
| tensor->extra = extra; | |
| } | |
| void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset) { | |
| if (g_scratch_size == 0) { | |
| return; | |
| } | |
| if (g_scratch_buffer == nullptr) { | |
| ggml_cuda_set_device(g_main_device); | |
| CUDA_CHECK(cudaMalloc(&g_scratch_buffer, g_scratch_size)); | |
| } | |
| ggml_tensor_extra_gpu * extra = ggml_cuda_alloc_temp_tensor_extra(); | |
| const bool inplace = tensor->view_src != nullptr; | |
| if (inplace && (tensor->view_src->backend == GGML_BACKEND_GPU || tensor->view_src->backend == GGML_BACKEND_GPU_SPLIT)) { | |
| ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->view_src->extra; | |
| char * src0_ddc = (char *) src0_extra->data_device[g_main_device]; | |
| size_t view_offset = 0; | |
| if (tensor->op == GGML_OP_VIEW) { | |
| memcpy(&view_offset, tensor->op_params, sizeof(size_t)); | |
| } | |
| extra->data_device[g_main_device] = src0_ddc + view_offset; | |
| } else { | |
| extra->data_device[g_main_device] = (char *) g_scratch_buffer + offset; | |
| } | |
| tensor->extra = extra; | |
| } | |
| void ggml_cuda_copy_to_device(struct ggml_tensor * tensor) { | |
| GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); | |
| GGML_ASSERT(ggml_is_contiguous(tensor)); | |
| ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra; | |
| ggml_cuda_set_device(g_main_device); | |
| CUDA_CHECK(cudaMemcpy(extra->data_device[g_main_device], tensor->data, ggml_nbytes(tensor), cudaMemcpyHostToDevice)); | |
| } | |
| void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) { | |
| ggml_cuda_assign_buffers_impl(tensor, true, false, false); | |
| } | |
| void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor) { | |
| ggml_cuda_assign_buffers_impl(tensor, true, false, true); | |
| } | |
| void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor) { | |
| ggml_cuda_assign_buffers_impl(tensor, false, false, false); | |
| } | |
| void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor) { | |
| ggml_cuda_assign_buffers_impl(tensor, false, true, false); | |
| } | |
| void ggml_cuda_set_main_device(const int main_device) { | |
| if (main_device >= g_device_count) { | |
| fprintf(stderr, "warning: cannot set main_device=%d because there are only %d devices. Using device %d instead.\n", | |
| main_device, g_device_count, g_main_device); | |
| return; | |
| } | |
| if (g_main_device != main_device && g_device_count > 1) { | |
| g_main_device = main_device; | |
| cudaDeviceProp prop; | |
| CUDA_CHECK(cudaGetDeviceProperties(&prop, g_main_device)); | |
| fprintf(stderr, "%s: using device %d (%s) as main device\n", __func__, g_main_device, prop.name); | |
| } | |
| } | |
| void ggml_cuda_set_scratch_size(const size_t scratch_size) { | |
| // this is a hack to not completely break llama.cpp when using multiple models or contexts simultaneously | |
| // it still won't always work as expected, but it's better than nothing | |
| if (scratch_size > g_scratch_size) { | |
| ggml_cuda_free_scratch(); | |
| } | |
| g_scratch_size = std::max(g_scratch_size, scratch_size); | |
| } | |
| void ggml_cuda_free_scratch() { | |
| if (g_scratch_buffer == nullptr) { | |
| return; | |
| } | |
| CUDA_CHECK(cudaFree(g_scratch_buffer)); | |
| g_scratch_buffer = nullptr; | |
| } | |
| bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { | |
| if (!g_cublas_loaded) return false; | |
| ggml_cuda_func_t func; | |
| const bool any_on_device = tensor->backend == GGML_BACKEND_GPU | |
| || (tensor->src[0] != nullptr && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) | |
| || (tensor->src[1] != nullptr && tensor->src[1]->backend == GGML_BACKEND_GPU); | |
| if (!any_on_device && tensor->op != GGML_OP_MUL_MAT && tensor->op != GGML_OP_MUL_MAT_ID) { | |
| return false; | |
| } | |
| if (tensor->op == GGML_OP_MUL_MAT) { | |
| if (tensor->src[0]->ne[3] != tensor->src[1]->ne[3]) { | |
| fprintf(stderr, "%s: cannot compute %s: src0->ne[3] = %" PRId64 ", src1->ne[3] = %" PRId64 " - fallback to CPU\n", __func__, tensor->name, tensor->src[0]->ne[3], tensor->src[1]->ne[3]); | |
| return false; | |
| } | |
| } | |
| switch (tensor->op) { | |
| case GGML_OP_REPEAT: | |
| func = ggml_cuda_repeat; | |
| break; | |
| case GGML_OP_GET_ROWS: | |
| func = ggml_cuda_get_rows; | |
| break; | |
| case GGML_OP_DUP: | |
| func = ggml_cuda_dup; | |
| break; | |
| case GGML_OP_ADD: | |
| func = ggml_cuda_add; | |
| break; | |
| case GGML_OP_ACC: | |
| func = ggml_cuda_acc; | |
| break; | |
| case GGML_OP_MUL: | |
| func = ggml_cuda_mul; | |
| break; | |
| case GGML_OP_DIV: | |
| func = ggml_cuda_div; | |
| break; | |
| case GGML_OP_UNARY: | |
| switch (ggml_get_unary_op(tensor)) { | |
| case GGML_UNARY_OP_GELU: | |
| func = ggml_cuda_gelu; | |
| break; | |
| case GGML_UNARY_OP_SILU: | |
| func = ggml_cuda_silu; | |
| break; | |
| case GGML_UNARY_OP_GELU_QUICK: | |
| func = ggml_cuda_gelu_quick; | |
| break; | |
| case GGML_UNARY_OP_TANH: | |
| func = ggml_cuda_tanh; | |
| break; | |
| case GGML_UNARY_OP_RELU: | |
| func = ggml_cuda_relu; | |
| break; | |
| default: | |
| return false; | |
| } | |
| break; | |
| case GGML_OP_NORM: | |
| func = ggml_cuda_norm; | |
| break; | |
| case GGML_OP_GROUP_NORM: | |
| func = ggml_cuda_group_norm; | |
| break; | |
| case GGML_OP_CONCAT: | |
| func = ggml_cuda_concat; | |
| break; | |
| case GGML_OP_UPSCALE: | |
| func = ggml_cuda_upscale; | |
| break; | |
| case GGML_OP_PAD: | |
| func = ggml_cuda_pad; | |
| break; | |
| case GGML_OP_LEAKY_RELU: | |
| func = ggml_cuda_leaky_relu; | |
| break; | |
| case GGML_OP_RMS_NORM: | |
| func = ggml_cuda_rms_norm; | |
| break; | |
| case GGML_OP_MUL_MAT: | |
| if (!any_on_device && !ggml_cuda_can_mul_mat(tensor->src[0], tensor->src[1], tensor)) { | |
| return false; | |
| } | |
| func = ggml_cuda_mul_mat; | |
| break; | |
| case GGML_OP_MUL_MAT_ID: | |
| if (!any_on_device && !ggml_cuda_can_mul_mat(tensor->src[2], tensor->src[1], tensor)) { | |
| return false; | |
| } | |
| func = ggml_cuda_mul_mat_id; | |
| break; | |
| case GGML_OP_SCALE: | |
| func = ggml_cuda_scale; | |
| break; | |
| case GGML_OP_SQR: | |
| func = ggml_cuda_sqr; | |
| break; | |
| case GGML_OP_CLAMP: | |
| func = ggml_cuda_clamp; | |
| break; | |
| case GGML_OP_CPY: | |
| func = ggml_cuda_cpy; | |
| break; | |
| case GGML_OP_CONT: | |
| func = ggml_cuda_dup; | |
| break; | |
| case GGML_OP_NONE: | |
| case GGML_OP_RESHAPE: | |
| case GGML_OP_VIEW: | |
| case GGML_OP_PERMUTE: | |
| case GGML_OP_TRANSPOSE: | |
| func = ggml_cuda_nop; | |
| break; | |
| case GGML_OP_DIAG_MASK_INF: | |
| func = ggml_cuda_diag_mask_inf; | |
| break; | |
| case GGML_OP_SOFT_MAX: | |
| func = ggml_cuda_soft_max; | |
| break; | |
| case GGML_OP_ROPE: | |
| func = ggml_cuda_rope; | |
| break; | |
| case GGML_OP_ALIBI: | |
| func = ggml_cuda_alibi; | |
| break; | |
| case GGML_OP_IM2COL: | |
| func = ggml_cuda_im2col; | |
| break; | |
| case GGML_OP_SUM_ROWS: | |
| func = ggml_cuda_sum_rows; | |
| break; | |
| case GGML_OP_ARGSORT: | |
| func = ggml_cuda_argsort; | |
| break; | |
| default: | |
| return false; | |
| } | |
| if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT) { | |
| ggml_cuda_set_peer_access(tensor->src[1]->ne[1]); | |
| } | |
| if (params->ith != 0) { | |
| return true; | |
| } | |
| if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { | |
| return true; | |
| } | |
| func(tensor->src[0], tensor->src[1], tensor); | |
| return true; | |
| } | |
| int ggml_cuda_get_device_count() { | |
| int device_count; | |
| if (cudaGetDeviceCount(&device_count) != cudaSuccess) { | |
| return 0; | |
| } | |
| return device_count; | |
| } | |
| void ggml_cuda_get_device_description(int device, char * description, size_t description_size) { | |
| cudaDeviceProp prop; | |
| CUDA_CHECK(cudaGetDeviceProperties(&prop, device)); | |
| snprintf(description, description_size, "%s", prop.name); | |
| } | |
| //////////////////////////////////////////////////////////////////////////////// | |
| // backend interface | |
| // cuda buffer | |
| struct ggml_backend_buffer_context_cuda { | |
| int device; | |
| void * dev_ptr = nullptr; | |
| ggml_tensor_extra_gpu * temp_tensor_extras = nullptr; | |
| size_t temp_tensor_extra_index = 0; | |
| ggml_backend_buffer_context_cuda(int device, void * dev_ptr) : device(device), dev_ptr(dev_ptr) {} | |
| ~ggml_backend_buffer_context_cuda() { | |
| delete[] temp_tensor_extras; | |
| } | |
| ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() { | |
| if (temp_tensor_extras == nullptr) { | |
| temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_CUDA_MAX_NODES]; | |
| } | |
| size_t alloc_index = temp_tensor_extra_index; | |
| temp_tensor_extra_index = (temp_tensor_extra_index + 1) % GGML_CUDA_MAX_NODES; | |
| ggml_tensor_extra_gpu * extra = &temp_tensor_extras[alloc_index]; | |
| memset(extra, 0, sizeof(*extra)); | |
| return extra; | |
| } | |
| }; | |
| static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) { | |
| ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; | |
| CUDA_CHECK(cudaFree(ctx->dev_ptr)); | |
| delete ctx; | |
| } | |
| static void * ggml_backend_cuda_buffer_get_base(ggml_backend_buffer_t buffer) { | |
| ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; | |
| return ctx->dev_ptr; | |
| } | |
| static void ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { | |
| ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; | |
| if (tensor->view_src != NULL && tensor->view_offs == 0) { | |
| assert(tensor->view_src->buffer->buft == buffer->buft); | |
| tensor->backend = tensor->view_src->backend; | |
| tensor->extra = tensor->view_src->extra; | |
| return; | |
| } | |
| ggml_tensor_extra_gpu * extra = ctx->ggml_cuda_alloc_temp_tensor_extra(); | |
| extra->data_device[ctx->device] = tensor->data; | |
| tensor->backend = GGML_BACKEND_GPU; | |
| tensor->extra = extra; | |
| if (ggml_is_quantized(tensor->type)) { | |
| // initialize padding to 0 to avoid possible NaN values | |
| int64_t row_low = 0; | |
| int64_t row_high = ggml_nrows(tensor); | |
| int64_t nrows_split = row_high - row_low; | |
| size_t original_size = ggml_nbytes_split(tensor, nrows_split); | |
| size_t padded_size = ggml_backend_buft_get_alloc_size(buffer->buft, tensor); | |
| if (padded_size > original_size && tensor->view_src == nullptr) { | |
| CUDA_CHECK(cudaMemsetAsync((char *)tensor->data + original_size, 0, padded_size - original_size, g_cudaStreams[ctx->device][0])); | |
| } | |
| } | |
| UNUSED(buffer); | |
| } | |
| static void ggml_backend_cuda_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { | |
| GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); | |
| ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; | |
| ggml_cuda_set_device(ctx->device); | |
| CUDA_CHECK(cudaDeviceSynchronize()); | |
| CUDA_CHECK(cudaMemcpy((char *)tensor->data + offset, data, size, cudaMemcpyHostToDevice)); | |
| CUDA_CHECK(cudaDeviceSynchronize()); | |
| } | |
| static void ggml_backend_cuda_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { | |
| GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); | |
| ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; | |
| ggml_cuda_set_device(ctx->device); | |
| CUDA_CHECK(cudaDeviceSynchronize()); | |
| CUDA_CHECK(cudaMemcpy(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost)); | |
| } | |
| static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { | |
| ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; | |
| ggml_cuda_set_device(ctx->device); | |
| CUDA_CHECK(cudaDeviceSynchronize()); | |
| CUDA_CHECK(cudaMemset(ctx->dev_ptr, value, buffer->size)); | |
| } | |
| static struct ggml_backend_buffer_i cuda_backend_buffer_interface = { | |
| /* .free_buffer = */ ggml_backend_cuda_buffer_free_buffer, | |
| /* .get_base = */ ggml_backend_cuda_buffer_get_base, | |
| /* .init_tensor = */ ggml_backend_cuda_buffer_init_tensor, | |
| /* .set_tensor = */ ggml_backend_cuda_buffer_set_tensor, | |
| /* .get_tensor = */ ggml_backend_cuda_buffer_get_tensor, | |
| /* .cpy_tensor_from = */ NULL, | |
| /* .cpy_tensor_to = */ NULL, | |
| /* .clear = */ ggml_backend_cuda_buffer_clear, | |
| }; | |
| // cuda buffer type | |
| static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { | |
| int device = (int) (intptr_t) buft->context; | |
| ggml_cuda_set_device(device); | |
| size = std::max(size, (size_t)1); // cudaMalloc returns null for size 0 | |
| void * dev_ptr; | |
| CUDA_CHECK(cudaMalloc(&dev_ptr, size)); | |
| ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda(device, dev_ptr); | |
| return ggml_backend_buffer_init(buft, cuda_backend_buffer_interface, ctx, size); | |
| } | |
| static size_t ggml_backend_cuda_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { | |
| return 128; | |
| UNUSED(buft); | |
| } | |
| static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, ggml_tensor * tensor) { | |
| int64_t row_low = 0; | |
| int64_t row_high = ggml_nrows(tensor); | |
| int64_t nrows_split = row_high - row_low; | |
| size_t size = ggml_nbytes_split(tensor, nrows_split); | |
| int64_t ne0 = tensor->ne[0]; | |
| if (ggml_is_quantized(tensor->type)) { | |
| if (ne0 % MATRIX_ROW_PADDING != 0) { | |
| size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); | |
| } | |
| } | |
| return size; | |
| UNUSED(buft); | |
| } | |
| static bool ggml_backend_cuda_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) { | |
| return ggml_backend_is_cuda(backend); | |
| UNUSED(buft); | |
| } | |
| static ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = { | |
| /* .alloc_buffer = */ ggml_backend_cuda_buffer_type_alloc_buffer, | |
| /* .get_alignment = */ ggml_backend_cuda_buffer_type_get_alignment, | |
| /* .get_alloc_size = */ ggml_backend_cuda_buffer_type_get_alloc_size, | |
| /* .supports_backend = */ ggml_backend_cuda_buffer_type_supports_backend, | |
| /* .is_host = */ nullptr, | |
| }; | |
| ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { | |
| static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_types[GGML_CUDA_MAX_DEVICES]; | |
| static bool ggml_backend_cuda_buffer_type_initialized = false; | |
| if (!ggml_backend_cuda_buffer_type_initialized) { | |
| for (int i = 0; i < GGML_CUDA_MAX_DEVICES; i++) { | |
| ggml_backend_cuda_buffer_types[i] = { | |
| /* .iface = */ ggml_backend_cuda_buffer_type_interface, | |
| /* .context = */ (ggml_backend_buffer_type_context_t) (intptr_t) i, | |
| }; | |
| } | |
| ggml_backend_cuda_buffer_type_initialized = true; | |
| } | |
| return &ggml_backend_cuda_buffer_types[device]; | |
| } | |
| // host buffer type | |
| static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { | |
| ggml_cuda_host_free(buffer->context); | |
| } | |
| static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { | |
| void * ptr = ggml_cuda_host_malloc(size); | |
| if (ptr == nullptr) { | |
| // fallback to cpu buffer | |
| return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); | |
| } | |
| // FIXME: this is a hack to avoid having to implement a new buffer type | |
| ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); | |
| buffer->buft = buft; | |
| buffer->iface.free_buffer = ggml_backend_cuda_host_buffer_free_buffer; | |
| return buffer; | |
| } | |
| ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() { | |
| static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_type_host = { | |
| /* .iface = */ { | |
| /* .alloc_buffer = */ ggml_backend_cuda_host_buffer_type_alloc_buffer, | |
| /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, | |
| /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, | |
| /* .supports_backend = */ ggml_backend_cpu_buffer_type()->iface.supports_backend, | |
| /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host, | |
| }, | |
| /* .context = */ nullptr, | |
| }; | |
| return &ggml_backend_cuda_buffer_type_host; | |
| } | |
| // backend | |
| struct ggml_backend_context_cuda { | |
| int device; | |
| }; | |
| static const char * ggml_backend_cuda_name(ggml_backend_t backend) { | |
| return GGML_CUDA_NAME; | |
| UNUSED(backend); | |
| } | |
| static void ggml_backend_cuda_free(ggml_backend_t backend) { | |
| ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; | |
| delete cuda_ctx; | |
| delete backend; | |
| } | |
| static ggml_backend_buffer_type_t ggml_backend_cuda_get_default_buffer_type(ggml_backend_t backend) { | |
| ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; | |
| return ggml_backend_cuda_buffer_type(cuda_ctx->device); | |
| } | |
| static void ggml_backend_cuda_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { | |
| ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; | |
| GGML_ASSERT(tensor->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type"); | |
| GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); | |
| CUDA_CHECK(cudaMemcpyAsync((char *)tensor->data + offset, data, size, cudaMemcpyHostToDevice, g_cudaStreams[cuda_ctx->device][0])); | |
| } | |
| static void ggml_backend_cuda_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { | |
| ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; | |
| GGML_ASSERT(tensor->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type"); | |
| GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); | |
| CUDA_CHECK(cudaMemcpyAsync(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost, g_cudaStreams[cuda_ctx->device][0])); | |
| } | |
| static void ggml_backend_cuda_synchronize(ggml_backend_t backend) { | |
| ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; | |
| CUDA_CHECK(cudaStreamSynchronize(g_cudaStreams[cuda_ctx->device][0])); | |
| UNUSED(backend); | |
| } | |
| static ggml_backend_graph_plan_t ggml_backend_cuda_graph_plan_create(ggml_backend_t backend, ggml_cgraph * cgraph) { | |
| GGML_ASSERT(!"not implemented"); | |
| return nullptr; | |
| UNUSED(backend); | |
| UNUSED(cgraph); | |
| } | |
| static void ggml_backend_cuda_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { | |
| GGML_ASSERT(!"not implemented"); | |
| UNUSED(backend); | |
| UNUSED(plan); | |
| } | |
| static void ggml_backend_cuda_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { | |
| GGML_ASSERT(!"not implemented"); | |
| UNUSED(backend); | |
| UNUSED(plan); | |
| } | |
| static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { | |
| ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; | |
| ggml_cuda_set_main_device(cuda_ctx->device); | |
| ggml_compute_params params = {}; | |
| params.type = GGML_TASK_COMPUTE; | |
| params.ith = 0; | |
| for (int i = 0; i < cgraph->n_nodes; i++) { | |
| ggml_tensor * node = cgraph->nodes[i]; | |
| if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE) | |
| continue; | |
| assert(node->backend == GGML_BACKEND_GPU); | |
| assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); | |
| assert(node->extra != nullptr); | |
| for (int j = 0; j < GGML_MAX_SRC; j++) { | |
| if (node->src[j] != nullptr) { | |
| assert(node->src[j]->backend == GGML_BACKEND_GPU); | |
| assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); | |
| assert(node->src[j]->extra != nullptr); | |
| } | |
| } | |
| bool ok = ggml_cuda_compute_forward(¶ms, node); | |
| if (!ok) { | |
| fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); | |
| } | |
| GGML_ASSERT(ok); | |
| if (node->type == GGML_TYPE_F32) { | |
| cudaDeviceSynchronize(); | |
| std::vector<float> tmp(ggml_nelements(node), 0.0f); | |
| cudaMemcpy(tmp.data(), node->data, ggml_nelements(node)*sizeof(float), cudaMemcpyDeviceToHost); | |
| printf("\n%s (%s) (%s %s) (%s %s): ", node->name, ggml_op_name(node->op), | |
| ggml_type_name(node->src[0]->type), | |
| node->src[1] ? ggml_type_name(node->src[1]->type) : "none", | |
| node->src[0]->name, | |
| node->src[1] ? node->src[1]->name : "none"); | |
| double sum = 0.0; | |
| double sq_sum = 0.0; | |
| for (int i = 0; i < ggml_nelements(node); i++) { | |
| printf("%f ", tmp[i]); | |
| sum += tmp[i]; | |
| sq_sum += tmp[i]*tmp[i]; | |
| } | |
| printf("\n"); | |
| printf("sum: %f, ", sum); | |
| printf("sq_sum: %f\n", sq_sum); | |
| } | |
| } | |
| UNUSED(backend); | |
| return true; | |
| } | |
| static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, const ggml_tensor * op) { | |
| switch (op->op) { | |
| case GGML_OP_UNARY: | |
| switch (ggml_get_unary_op(op)) { | |
| case GGML_UNARY_OP_GELU: | |
| case GGML_UNARY_OP_SILU: | |
| case GGML_UNARY_OP_RELU: | |
| case GGML_UNARY_OP_GELU_QUICK: | |
| case GGML_UNARY_OP_TANH: | |
| return true; | |
| default: | |
| return false; | |
| } | |
| break; | |
| case GGML_OP_MUL_MAT: | |
| case GGML_OP_MUL_MAT_ID: | |
| { | |
| struct ggml_tensor * a; | |
| struct ggml_tensor * b; | |
| if (op->op == GGML_OP_MUL_MAT) { | |
| a = op->src[0]; | |
| b = op->src[1]; | |
| } else { | |
| a = op->src[2]; | |
| b = op->src[1]; | |
| } | |
| if (a->ne[3] != b->ne[3]) { | |
| return false; | |
| } | |
| return true; | |
| } break; | |
| case GGML_OP_GET_ROWS: | |
| { | |
| switch (op->src[0]->type) { | |
| case GGML_TYPE_F16: | |
| case GGML_TYPE_F32: | |
| case GGML_TYPE_Q4_0: | |
| case GGML_TYPE_Q4_1: | |
| case GGML_TYPE_Q5_0: | |
| case GGML_TYPE_Q5_1: | |
| case GGML_TYPE_Q8_0: | |
| return true; | |
| default: | |
| return false; | |
| } | |
| } break; | |
| case GGML_OP_CPY: | |
| { | |
| ggml_type src0_type = op->src[0]->type; | |
| ggml_type src1_type = op->src[1]->type; | |
| if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) { | |
| return true; | |
| } | |
| if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) { | |
| return true; | |
| } | |
| if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q8_0) { | |
| return true; | |
| } | |
| if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q4_0) { | |
| return true; | |
| } | |
| if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q4_1) { | |
| return true; | |
| } | |
| if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) { | |
| return true; | |
| } | |
| return false; | |
| } break; | |
| case GGML_OP_DUP: | |
| case GGML_OP_REPEAT: | |
| case GGML_OP_CONCAT: | |
| { | |
| ggml_type src0_type = op->src[0]->type; | |
| return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16; | |
| } break; | |
| case GGML_OP_NONE: | |
| case GGML_OP_RESHAPE: | |
| case GGML_OP_VIEW: | |
| case GGML_OP_PERMUTE: | |
| case GGML_OP_TRANSPOSE: | |
| case GGML_OP_NORM: | |
| case GGML_OP_ADD: | |
| case GGML_OP_MUL: | |
| case GGML_OP_DIV: | |
| case GGML_OP_RMS_NORM: | |
| case GGML_OP_SCALE: | |
| case GGML_OP_SQR: | |
| case GGML_OP_CLAMP: | |
| case GGML_OP_CONT: | |
| case GGML_OP_DIAG_MASK_INF: | |
| case GGML_OP_SOFT_MAX: | |
| case GGML_OP_ROPE: | |
| case GGML_OP_ALIBI: | |
| case GGML_OP_IM2COL: | |
| case GGML_OP_SUM_ROWS: | |
| case GGML_OP_ARGSORT: | |
| case GGML_OP_ACC: | |
| case GGML_OP_GROUP_NORM: | |
| case GGML_OP_UPSCALE: | |
| case GGML_OP_PAD: | |
| case GGML_OP_LEAKY_RELU: | |
| return true; | |
| default: | |
| return false; | |
| } | |
| UNUSED(backend); | |
| } | |
| static ggml_backend_i cuda_backend_i = { | |
| /* .get_name = */ ggml_backend_cuda_name, | |
| /* .free = */ ggml_backend_cuda_free, | |
| /* .get_default_buffer_type = */ ggml_backend_cuda_get_default_buffer_type, | |
| /* .set_tensor_async = */ ggml_backend_cuda_set_tensor_async, | |
| /* .get_tensor_async = */ ggml_backend_cuda_get_tensor_async, | |
| /* .cpy_tensor_from_async = */ NULL, | |
| /* .cpy_tensor_to_async = */ NULL, | |
| /* .synchronize = */ ggml_backend_cuda_synchronize, | |
| /* .graph_plan_create = */ ggml_backend_cuda_graph_plan_create, | |
| /* .graph_plan_free = */ ggml_backend_cuda_graph_plan_free, | |
| /* .graph_plan_compute = */ ggml_backend_cuda_graph_plan_compute, | |
| /* .graph_compute = */ ggml_backend_cuda_graph_compute, | |
| /* .supports_op = */ ggml_backend_cuda_supports_op, | |
| }; | |
| ggml_backend_t ggml_backend_cuda_init(int device) { | |
| ggml_init_cublas(); // TODO: remove from ggml.c | |
| if (device < 0 || device >= ggml_cuda_get_device_count()) { | |
| fprintf(stderr, "%s: error: invalid device %d\n", __func__, device); | |
| return nullptr; | |
| } | |
| // not strictly necessary, but it may reduce the overhead of the first graph_compute | |
| ggml_cuda_set_main_device(device); | |
| ggml_backend_context_cuda * ctx = new ggml_backend_context_cuda { | |
| /* .device = */ device | |
| }; | |
| ggml_backend_t cuda_backend = new ggml_backend { | |
| /* .interface = */ cuda_backend_i, | |
| /* .context = */ ctx | |
| }; | |
| return cuda_backend; | |
| } | |
| bool ggml_backend_is_cuda(ggml_backend_t backend) { | |
| return backend->iface.get_name == ggml_backend_cuda_name; | |
| } | |
| static ggml_backend_t ggml_backend_reg_cuda_init(const char * params, void * user_data) { | |
| ggml_backend_t cuda_backend = ggml_backend_cuda_init((int) (intptr_t) user_data); | |
| return cuda_backend; | |
| UNUSED(params); | |
| } | |
| extern "C" int ggml_backend_cuda_reg_devices(); | |
| int ggml_backend_cuda_reg_devices() { | |
| int device_count = ggml_cuda_get_device_count(); | |
| //int device_count = 1; // DEBUG: some tools require delaying CUDA initialization | |
| for (int i = 0; i < device_count; i++) { | |
| char name[128]; | |
| snprintf(name, sizeof(name), "%s%d", GGML_CUDA_NAME, i); | |
| ggml_backend_register(name, ggml_backend_reg_cuda_init, ggml_backend_cuda_buffer_type(i), (void *) (intptr_t) i); | |
| } | |
| return device_count; | |
| } | |