Spaces:
Running
Running
| // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example: | |
| // | |
| // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ | |
| // | |
| // multiply int8_t, add results pairwise twice | |
| static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { | |
| // Get absolute values of x vectors | |
| const __m128i ax = _mm_sign_epi8(x, x); | |
| // Sign the values of the y vectors | |
| const __m128i sy = _mm_sign_epi8(y, x); | |
| // Perform multiplication and create 16-bit values | |
| const __m128i dot = _mm_maddubs_epi16(ax, sy); | |
| const __m128i ones = _mm_set1_epi16(1); | |
| return _mm_madd_epi16(ones, dot); | |
| } | |
| // horizontally add 8 floats | |
| static inline float hsum_float_8(const __m256 x) { | |
| __m128 res = _mm256_extractf128_ps(x, 1); | |
| res = _mm_add_ps(res, _mm256_castps256_ps128(x)); | |
| res = _mm_add_ps(res, _mm_movehl_ps(res, res)); | |
| res = _mm_add_ss(res, _mm_movehdup_ps(res)); | |
| return _mm_cvtss_f32(res); | |
| } | |
| // horizontally add 8 int32_t | |
| static inline int hsum_i32_8(const __m256i a) { | |
| const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); | |
| const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128); | |
| const __m128i sum64 = _mm_add_epi32(hi64, sum128); | |
| const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); | |
| return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); | |
| } | |
| // horizontally add 4 int32_t | |
| static inline int hsum_i32_4(const __m128i a) { | |
| const __m128i hi64 = _mm_unpackhi_epi64(a, a); | |
| const __m128i sum64 = _mm_add_epi32(hi64, a); | |
| const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); | |
| return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); | |
| } | |
| // spread 32 bits to 32 bytes { 0x00, 0xFF } | |
| static inline __m256i bytes_from_bits_32(const uint8_t * x) { | |
| uint32_t x32; | |
| memcpy(&x32, x, sizeof(uint32_t)); | |
| const __m256i shuf_mask = _mm256_set_epi64x( | |
| 0x0303030303030303, 0x0202020202020202, | |
| 0x0101010101010101, 0x0000000000000000); | |
| __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask); | |
| const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe); | |
| bytes = _mm256_or_si256(bytes, bit_mask); | |
| return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1)); | |
| } | |
| // Unpack 32 4-bit fields into 32 bytes | |
| // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval | |
| static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) | |
| { | |
| const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi); | |
| const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); | |
| const __m256i lowMask = _mm256_set1_epi8( 0xF ); | |
| return _mm256_and_si256(lowMask, bytes); | |
| } | |
| // add int16_t pairwise and return as float vector | |
| static inline __m256 sum_i16_pairs_float(const __m256i x) { | |
| const __m256i ones = _mm256_set1_epi16(1); | |
| const __m256i summed_pairs = _mm256_madd_epi16(ones, x); | |
| return _mm256_cvtepi32_ps(summed_pairs); | |
| } | |
| static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { | |
| const __m256i zero = _mm256_setzero_si256(); | |
| const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); | |
| return _mm256_cvtepi32_ps(summed_pairs); | |
| // Perform multiplication and create 16-bit values | |
| const __m256i dot = _mm256_maddubs_epi16(ax, sy); | |
| return sum_i16_pairs_float(dot); | |
| } | |
| // multiply int8_t, add results pairwise twice and return as float vector | |
| static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { | |
| const __m256i zero = _mm256_setzero_si256(); | |
| const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y); | |
| return _mm256_cvtepi32_ps(summed_pairs); | |
| // Get absolute values of x vectors | |
| const __m256i ax = _mm256_sign_epi8(x, x); | |
| // Sign the values of the y vectors | |
| const __m256i sy = _mm256_sign_epi8(y, x); | |
| return mul_sum_us8_pairs_float(ax, sy); | |
| } | |
| static inline __m128i packNibbles( __m256i bytes ) | |
| { | |
| // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh | |
| const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000 | |
| bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh | |
| return _mm256_cvtepi16_epi8(bytes); // abcd_efgh | |
| const __m256i lowByte = _mm256_set1_epi16( 0xFF ); | |
| __m256i high = _mm256_andnot_si256( lowByte, bytes ); | |
| __m256i low = _mm256_and_si256( lowByte, bytes ); | |
| high = _mm256_srli_epi16( high, 4 ); | |
| bytes = _mm256_or_si256( low, high ); | |
| // Compress uint16_t lanes into bytes | |
| __m128i r0 = _mm256_castsi256_si128( bytes ); | |
| __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); | |
| return _mm_packus_epi16( r0, r1 ); | |
| } | |
| // spread 32 bits to 32 bytes { 0x00, 0xFF } | |
| static inline __m256i bytes_from_bits_32(const uint8_t * x) { | |
| uint32_t x32; | |
| memcpy(&x32, x, sizeof(uint32_t)); | |
| const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); | |
| const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202); | |
| __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl); | |
| __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh); | |
| const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe); | |
| bytesl = _mm_or_si128(bytesl, bit_mask); | |
| bytesh = _mm_or_si128(bytesh, bit_mask); | |
| bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1)); | |
| bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1)); | |
| return MM256_SET_M128I(bytesh, bytesl); | |
| } | |
| // Unpack 32 4-bit fields into 32 bytes | |
| // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval | |
| static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) | |
| { | |
| // Load 16 bytes from memory | |
| __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi); | |
| __m128i tmph = _mm_srli_epi16(tmpl, 4); | |
| const __m128i lowMask = _mm_set1_epi8(0xF); | |
| tmpl = _mm_and_si128(lowMask, tmpl); | |
| tmph = _mm_and_si128(lowMask, tmph); | |
| return MM256_SET_M128I(tmph, tmpl); | |
| } | |
| // add int16_t pairwise and return as float vector | |
| static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) { | |
| const __m128i ones = _mm_set1_epi16(1); | |
| const __m128i summed_pairsl = _mm_madd_epi16(ones, xl); | |
| const __m128i summed_pairsh = _mm_madd_epi16(ones, xh); | |
| const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl); | |
| return _mm256_cvtepi32_ps(summed_pairs); | |
| } | |
| static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { | |
| const __m128i axl = _mm256_castsi256_si128(ax); | |
| const __m128i axh = _mm256_extractf128_si256(ax, 1); | |
| const __m128i syl = _mm256_castsi256_si128(sy); | |
| const __m128i syh = _mm256_extractf128_si256(sy, 1); | |
| // Perform multiplication and create 16-bit values | |
| const __m128i dotl = _mm_maddubs_epi16(axl, syl); | |
| const __m128i doth = _mm_maddubs_epi16(axh, syh); | |
| return sum_i16_pairs_float(doth, dotl); | |
| } | |
| // multiply int8_t, add results pairwise twice and return as float vector | |
| static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { | |
| const __m128i xl = _mm256_castsi256_si128(x); | |
| const __m128i xh = _mm256_extractf128_si256(x, 1); | |
| const __m128i yl = _mm256_castsi256_si128(y); | |
| const __m128i yh = _mm256_extractf128_si256(y, 1); | |
| // Get absolute values of x vectors | |
| const __m128i axl = _mm_sign_epi8(xl, xl); | |
| const __m128i axh = _mm_sign_epi8(xh, xh); | |
| // Sign the values of the y vectors | |
| const __m128i syl = _mm_sign_epi8(yl, xl); | |
| const __m128i syh = _mm_sign_epi8(yh, xh); | |
| // Perform multiplication and create 16-bit values | |
| const __m128i dotl = _mm_maddubs_epi16(axl, syl); | |
| const __m128i doth = _mm_maddubs_epi16(axh, syh); | |
| return sum_i16_pairs_float(doth, dotl); | |
| } | |
| static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) | |
| { | |
| // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh | |
| const __m128i lowByte = _mm_set1_epi16( 0xFF ); | |
| __m128i high = _mm_andnot_si128( lowByte, bytes1 ); | |
| __m128i low = _mm_and_si128( lowByte, bytes1 ); | |
| high = _mm_srli_epi16( high, 4 ); | |
| bytes1 = _mm_or_si128( low, high ); | |
| high = _mm_andnot_si128( lowByte, bytes2 ); | |
| low = _mm_and_si128( lowByte, bytes2 ); | |
| high = _mm_srli_epi16( high, 4 ); | |
| bytes2 = _mm_or_si128( low, high ); | |
| return _mm_packus_epi16( bytes1, bytes2); | |
| } | |
| // horizontally add 4x4 floats | |
| static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { | |
| __m128 res_0 =_mm_hadd_ps(a, b); | |
| __m128 res_1 =_mm_hadd_ps(c, d); | |
| __m128 res =_mm_hadd_ps(res_0, res_1); | |
| res =_mm_hadd_ps(res, res); | |
| res =_mm_hadd_ps(res, res); | |
| return _mm_cvtss_f32(res); | |
| } | |
| // 64-bit compatibility | |
| // vaddvq_s16 | |
| // vpaddq_s16 | |
| // vpaddq_s32 | |
| // vaddvq_s32 | |
| // vaddvq_f32 | |
| // vmaxvq_f32 | |
| // vcvtnq_s32_f32 | |
| // vzip1_u8 | |
| // vzip2_u8 | |
| inline static int32_t vaddvq_s16(int16x8_t v) { | |
| return | |
| (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) + | |
| (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) + | |
| (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) + | |
| (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7); | |
| } | |
| inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) { | |
| int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a)); | |
| int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b)); | |
| return vcombine_s16(a0, b0); | |
| } | |
| inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) { | |
| int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a)); | |
| int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b)); | |
| return vcombine_s32(a0, b0); | |
| } | |
| inline static int32_t vaddvq_s32(int32x4_t v) { | |
| return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); | |
| } | |
| inline static float vaddvq_f32(float32x4_t v) { | |
| return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); | |
| } | |
| inline static float vmaxvq_f32(float32x4_t v) { | |
| return | |
| MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)), | |
| MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3))); | |
| } | |
| inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) { | |
| int32x4_t res; | |
| res[0] = roundf(vgetq_lane_f32(v, 0)); | |
| res[1] = roundf(vgetq_lane_f32(v, 1)); | |
| res[2] = roundf(vgetq_lane_f32(v, 2)); | |
| res[3] = roundf(vgetq_lane_f32(v, 3)); | |
| return res; | |
| } | |
| inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) { | |
| uint8x8_t res; | |
| res[0] = a[0]; res[1] = b[0]; | |
| res[2] = a[1]; res[3] = b[1]; | |
| res[4] = a[2]; res[5] = b[2]; | |
| res[6] = a[3]; res[7] = b[3]; | |
| return res; | |
| } | |
| inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) { | |
| uint8x8_t res; | |
| res[0] = a[4]; res[1] = b[4]; | |
| res[2] = a[5]; res[3] = b[5]; | |
| res[4] = a[6]; res[5] = b[6]; | |
| res[6] = a[7]; res[7] = b[7]; | |
| return res; | |
| } | |
| // vld1q_s16_x2 | |
| // vld1q_u8_x2 | |
| // vld1q_u8_x4 | |
| // vld1q_s8_x2 | |
| // vld1q_s8_x4 | |
| // TODO: double-check these work correctly | |
| typedef struct ggml_int16x8x2_t { | |
| int16x8_t val[2]; | |
| } ggml_int16x8x2_t; | |
| inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) { | |
| ggml_int16x8x2_t res; | |
| res.val[0] = vld1q_s16(ptr + 0); | |
| res.val[1] = vld1q_s16(ptr + 8); | |
| return res; | |
| } | |
| typedef struct ggml_uint8x16x2_t { | |
| uint8x16_t val[2]; | |
| } ggml_uint8x16x2_t; | |
| inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) { | |
| ggml_uint8x16x2_t res; | |
| res.val[0] = vld1q_u8(ptr + 0); | |
| res.val[1] = vld1q_u8(ptr + 16); | |
| return res; | |
| } | |
| typedef struct ggml_uint8x16x4_t { | |
| uint8x16_t val[4]; | |
| } ggml_uint8x16x4_t; | |
| inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) { | |
| ggml_uint8x16x4_t res; | |
| res.val[0] = vld1q_u8(ptr + 0); | |
| res.val[1] = vld1q_u8(ptr + 16); | |
| res.val[2] = vld1q_u8(ptr + 32); | |
| res.val[3] = vld1q_u8(ptr + 48); | |
| return res; | |
| } | |
| typedef struct ggml_int8x16x2_t { | |
| int8x16_t val[2]; | |
| } ggml_int8x16x2_t; | |
| inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) { | |
| ggml_int8x16x2_t res; | |
| res.val[0] = vld1q_s8(ptr + 0); | |
| res.val[1] = vld1q_s8(ptr + 16); | |
| return res; | |
| } | |
| typedef struct ggml_int8x16x4_t { | |
| int8x16_t val[4]; | |
| } ggml_int8x16x4_t; | |
| inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) { | |
| ggml_int8x16x4_t res; | |
| res.val[0] = vld1q_s8(ptr + 0); | |
| res.val[1] = vld1q_s8(ptr + 16); | |
| res.val[2] = vld1q_s8(ptr + 32); | |
| res.val[3] = vld1q_s8(ptr + 48); | |
| return res; | |
| } | |
| // NOTE: not tested | |
| inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) { | |
| int8x16_t res; | |
| res[ 0] = a[b[ 0]]; | |
| res[ 1] = a[b[ 1]]; | |
| res[ 2] = a[b[ 2]]; | |
| res[ 3] = a[b[ 3]]; | |
| res[ 4] = a[b[ 4]]; | |
| res[ 5] = a[b[ 5]]; | |
| res[ 6] = a[b[ 6]]; | |
| res[ 7] = a[b[ 7]]; | |
| res[ 8] = a[b[ 8]]; | |
| res[ 9] = a[b[ 9]]; | |
| res[10] = a[b[10]]; | |
| res[11] = a[b[11]]; | |
| res[12] = a[b[12]]; | |
| res[13] = a[b[13]]; | |
| res[14] = a[b[14]]; | |
| res[15] = a[b[15]]; | |
| return res; | |
| } | |
| // NOTE: not tested | |
| inline static int8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) { | |
| int8x16_t res; | |
| res[ 0] = a[b[ 0]]; | |
| res[ 1] = a[b[ 1]]; | |
| res[ 2] = a[b[ 2]]; | |
| res[ 3] = a[b[ 3]]; | |
| res[ 4] = a[b[ 4]]; | |
| res[ 5] = a[b[ 5]]; | |
| res[ 6] = a[b[ 6]]; | |
| res[ 7] = a[b[ 7]]; | |
| res[ 8] = a[b[ 8]]; | |
| res[ 9] = a[b[ 9]]; | |
| res[10] = a[b[10]]; | |
| res[11] = a[b[11]]; | |
| res[12] = a[b[12]]; | |
| res[13] = a[b[13]]; | |
| res[14] = a[b[14]]; | |
| res[15] = a[b[15]]; | |
| return res; | |
| } | |
| inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) { | |
| const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b)); | |
| const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b)); | |
| return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1))); | |
| } | |
| // precomputed tables for expanding 8bits to 8 bytes: | |
| static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 | |
| static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 | |
| // reference implementation for deterministic creation of model files | |
| void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) { | |
| static const int qk = QK4_0; | |
| assert(k % qk == 0); | |
| const int nb = k / qk; | |
| for (int i = 0; i < nb; i++) { | |
| float amax = 0.0f; // absolute max | |
| float max = 0.0f; | |
| for (int j = 0; j < qk; j++) { | |
| const float v = x[i*qk + j]; | |
| if (amax < fabsf(v)) { | |
| amax = fabsf(v); | |
| max = v; | |
| } | |
| } | |
| const float d = max / -8; | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = GGML_FP32_TO_FP16(d); | |
| for (int j = 0; j < qk/2; ++j) { | |
| const float x0 = x[i*qk + 0 + j]*id; | |
| const float x1 = x[i*qk + qk/2 + j]*id; | |
| const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); | |
| const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); | |
| y[i].qs[j] = xi0; | |
| y[i].qs[j] |= xi1 << 4; | |
| } | |
| } | |
| } | |
| void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { | |
| quantize_row_q4_0_reference(x, y, k); | |
| } | |
| void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) { | |
| const int qk = QK4_1; | |
| assert(k % qk == 0); | |
| const int nb = k / qk; | |
| for (int i = 0; i < nb; i++) { | |
| float min = FLT_MAX; | |
| float max = -FLT_MAX; | |
| for (int j = 0; j < qk; j++) { | |
| const float v = x[i*qk + j]; | |
| if (v < min) min = v; | |
| if (v > max) max = v; | |
| } | |
| const float d = (max - min) / ((1 << 4) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = GGML_FP32_TO_FP16(d); | |
| y[i].m = GGML_FP32_TO_FP16(min); | |
| for (int j = 0; j < qk/2; ++j) { | |
| const float x0 = (x[i*qk + 0 + j] - min)*id; | |
| const float x1 = (x[i*qk + qk/2 + j] - min)*id; | |
| const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); | |
| const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); | |
| y[i].qs[j] = xi0; | |
| y[i].qs[j] |= xi1 << 4; | |
| } | |
| } | |
| } | |
| void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) { | |
| quantize_row_q4_1_reference(x, y, k); | |
| } | |
| void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) { | |
| static const int qk = QK5_0; | |
| assert(k % qk == 0); | |
| const int nb = k / qk; | |
| for (int i = 0; i < nb; i++) { | |
| float amax = 0.0f; // absolute max | |
| float max = 0.0f; | |
| for (int j = 0; j < qk; j++) { | |
| const float v = x[i*qk + j]; | |
| if (amax < fabsf(v)) { | |
| amax = fabsf(v); | |
| max = v; | |
| } | |
| } | |
| const float d = max / -16; | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = GGML_FP32_TO_FP16(d); | |
| uint32_t qh = 0; | |
| for (int j = 0; j < qk/2; ++j) { | |
| const float x0 = x[i*qk + 0 + j]*id; | |
| const float x1 = x[i*qk + qk/2 + j]*id; | |
| const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); | |
| const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); | |
| y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); | |
| // get the 5-th bit and store it in qh at the right position | |
| qh |= ((xi0 & 0x10u) >> 4) << (j + 0); | |
| qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2); | |
| } | |
| memcpy(&y[i].qh, &qh, sizeof(qh)); | |
| } | |
| } | |
| void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) { | |
| quantize_row_q5_0_reference(x, y, k); | |
| } | |
| void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) { | |
| const int qk = QK5_1; | |
| assert(k % qk == 0); | |
| const int nb = k / qk; | |
| for (int i = 0; i < nb; i++) { | |
| float min = FLT_MAX; | |
| float max = -FLT_MAX; | |
| for (int j = 0; j < qk; j++) { | |
| const float v = x[i*qk + j]; | |
| if (v < min) min = v; | |
| if (v > max) max = v; | |
| } | |
| const float d = (max - min) / ((1 << 5) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = GGML_FP32_TO_FP16(d); | |
| y[i].m = GGML_FP32_TO_FP16(min); | |
| uint32_t qh = 0; | |
| for (int j = 0; j < qk/2; ++j) { | |
| const float x0 = (x[i*qk + 0 + j] - min)*id; | |
| const float x1 = (x[i*qk + qk/2 + j] - min)*id; | |
| const uint8_t xi0 = (uint8_t)(x0 + 0.5f); | |
| const uint8_t xi1 = (uint8_t)(x1 + 0.5f); | |
| y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); | |
| // get the 5-th bit and store it in qh at the right position | |
| qh |= ((xi0 & 0x10u) >> 4) << (j + 0); | |
| qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2); | |
| } | |
| memcpy(&y[i].qh, &qh, sizeof(y[i].qh)); | |
| } | |
| } | |
| void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) { | |
| quantize_row_q5_1_reference(x, y, k); | |
| } | |
| // reference implementation for deterministic creation of model files | |
| void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) { | |
| assert(k % QK8_0 == 0); | |
| const int nb = k / QK8_0; | |
| for (int i = 0; i < nb; i++) { | |
| float amax = 0.0f; // absolute max | |
| for (int j = 0; j < QK8_0; j++) { | |
| const float v = x[i*QK8_0 + j]; | |
| amax = MAX(amax, fabsf(v)); | |
| } | |
| const float d = amax / ((1 << 7) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = GGML_FP32_TO_FP16(d); | |
| for (int j = 0; j < QK8_0; ++j) { | |
| const float x0 = x[i*QK8_0 + j]*id; | |
| y[i].qs[j] = roundf(x0); | |
| } | |
| } | |
| } | |
| void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { | |
| assert(QK8_0 == 32); | |
| assert(k % QK8_0 == 0); | |
| const int nb = k / QK8_0; | |
| block_q8_0 * restrict y = vy; | |
| for (int i = 0; i < nb; i++) { | |
| float32x4_t srcv [8]; | |
| float32x4_t asrcv[8]; | |
| float32x4_t amaxv[8]; | |
| for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); | |
| for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); | |
| for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); | |
| for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); | |
| for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); | |
| const float amax = vmaxvq_f32(amaxv[0]); | |
| const float d = amax / ((1 << 7) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = GGML_FP32_TO_FP16(d); | |
| for (int j = 0; j < 8; j++) { | |
| const float32x4_t v = vmulq_n_f32(srcv[j], id); | |
| const int32x4_t vi = vcvtnq_s32_f32(v); | |
| y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); | |
| y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); | |
| y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); | |
| y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); | |
| } | |
| } | |
| for (int i = 0; i < nb; i++) { | |
| v128_t srcv [8]; | |
| v128_t asrcv[8]; | |
| v128_t amaxv[8]; | |
| for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); | |
| for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); | |
| for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); | |
| for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); | |
| for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); | |
| const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), | |
| wasm_f32x4_extract_lane(amaxv[0], 1)), | |
| MAX(wasm_f32x4_extract_lane(amaxv[0], 2), | |
| wasm_f32x4_extract_lane(amaxv[0], 3))); | |
| const float d = amax / ((1 << 7) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = GGML_FP32_TO_FP16(d); | |
| for (int j = 0; j < 8; j++) { | |
| const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); | |
| const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); | |
| y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); | |
| y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); | |
| y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); | |
| y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); | |
| } | |
| } | |
| for (int i = 0; i < nb; i++) { | |
| // Load elements into 4 AVX vectors | |
| __m256 v0 = _mm256_loadu_ps( x ); | |
| __m256 v1 = _mm256_loadu_ps( x + 8 ); | |
| __m256 v2 = _mm256_loadu_ps( x + 16 ); | |
| __m256 v3 = _mm256_loadu_ps( x + 24 ); | |
| x += 32; | |
| // Compute max(abs(e)) for the block | |
| const __m256 signBit = _mm256_set1_ps( -0.0f ); | |
| __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); | |
| maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); | |
| maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); | |
| maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); | |
| __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); | |
| max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); | |
| max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); | |
| const float maxScalar = _mm_cvtss_f32( max4 ); | |
| // Quantize these floats | |
| const float d = maxScalar / 127.f; | |
| y[i].d = GGML_FP32_TO_FP16(d); | |
| const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; | |
| const __m256 mul = _mm256_set1_ps( id ); | |
| // Apply the multiplier | |
| v0 = _mm256_mul_ps( v0, mul ); | |
| v1 = _mm256_mul_ps( v1, mul ); | |
| v2 = _mm256_mul_ps( v2, mul ); | |
| v3 = _mm256_mul_ps( v3, mul ); | |
| // Round to nearest integer | |
| v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); | |
| v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); | |
| v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); | |
| v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); | |
| // Convert floats to integers | |
| __m256i i0 = _mm256_cvtps_epi32( v0 ); | |
| __m256i i1 = _mm256_cvtps_epi32( v1 ); | |
| __m256i i2 = _mm256_cvtps_epi32( v2 ); | |
| __m256i i3 = _mm256_cvtps_epi32( v3 ); | |
| // Convert int32 to int16 | |
| i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 | |
| i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 | |
| // Convert int16 to int8 | |
| i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 | |
| // We got our precious signed bytes, but the order is now wrong | |
| // These AVX2 pack instructions process 16-byte pieces independently | |
| // The following instruction is fixing the order | |
| const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); | |
| i0 = _mm256_permutevar8x32_epi32( i0, perm ); | |
| _mm256_storeu_si256((__m256i *)y[i].qs, i0); | |
| // Since we don't have in AVX some necessary functions, | |
| // we split the registers in half and call AVX2 analogs from SSE | |
| __m128i ni0 = _mm256_castsi256_si128( i0 ); | |
| __m128i ni1 = _mm256_extractf128_si256( i0, 1); | |
| __m128i ni2 = _mm256_castsi256_si128( i1 ); | |
| __m128i ni3 = _mm256_extractf128_si256( i1, 1); | |
| __m128i ni4 = _mm256_castsi256_si128( i2 ); | |
| __m128i ni5 = _mm256_extractf128_si256( i2, 1); | |
| __m128i ni6 = _mm256_castsi256_si128( i3 ); | |
| __m128i ni7 = _mm256_extractf128_si256( i3, 1); | |
| // Convert int32 to int16 | |
| ni0 = _mm_packs_epi32( ni0, ni1 ); | |
| ni2 = _mm_packs_epi32( ni2, ni3 ); | |
| ni4 = _mm_packs_epi32( ni4, ni5 ); | |
| ni6 = _mm_packs_epi32( ni6, ni7 ); | |
| // Convert int16 to int8 | |
| ni0 = _mm_packs_epi16( ni0, ni2 ); | |
| ni4 = _mm_packs_epi16( ni4, ni6 ); | |
| _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); | |
| _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); | |
| } | |
| size_t vl = __riscv_vsetvl_e32m4(QK8_0); | |
| for (int i = 0; i < nb; i++) { | |
| // load elements | |
| vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl); | |
| vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl); | |
| vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl); | |
| vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl); | |
| float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); | |
| const float d = amax / ((1 << 7) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = GGML_FP32_TO_FP16(d); | |
| vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl); | |
| // convert to integer | |
| vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl); | |
| vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl); | |
| // store result | |
| __riscv_vse8_v_i8m1(y[i].qs , vs, vl); | |
| } | |
| GGML_UNUSED(nb); | |
| // scalar | |
| quantize_row_q8_0_reference(x, y, k); | |
| } | |
| // reference implementation for deterministic creation of model files | |
| void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) { | |
| assert(QK8_1 == 32); | |
| assert(k % QK8_1 == 0); | |
| const int nb = k / QK8_1; | |
| for (int i = 0; i < nb; i++) { | |
| float amax = 0.0f; // absolute max | |
| for (int j = 0; j < QK8_1; j++) { | |
| const float v = x[i*QK8_1 + j]; | |
| amax = MAX(amax, fabsf(v)); | |
| } | |
| const float d = amax / ((1 << 7) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = d; | |
| int sum = 0; | |
| for (int j = 0; j < QK8_1/2; ++j) { | |
| const float v0 = x[i*QK8_1 + j]*id; | |
| const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id; | |
| y[i].qs[ j] = roundf(v0); | |
| y[i].qs[QK8_1/2 + j] = roundf(v1); | |
| sum += y[i].qs[ j]; | |
| sum += y[i].qs[QK8_1/2 + j]; | |
| } | |
| y[i].s = sum*d; | |
| } | |
| } | |
| void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) { | |
| assert(k % QK8_1 == 0); | |
| const int nb = k / QK8_1; | |
| block_q8_1 * restrict y = vy; | |
| for (int i = 0; i < nb; i++) { | |
| float32x4_t srcv [8]; | |
| float32x4_t asrcv[8]; | |
| float32x4_t amaxv[8]; | |
| for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); | |
| for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); | |
| for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); | |
| for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); | |
| for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); | |
| const float amax = vmaxvq_f32(amaxv[0]); | |
| const float d = amax / ((1 << 7) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = d; | |
| int32x4_t accv = vdupq_n_s32(0); | |
| for (int j = 0; j < 8; j++) { | |
| const float32x4_t v = vmulq_n_f32(srcv[j], id); | |
| const int32x4_t vi = vcvtnq_s32_f32(v); | |
| y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); | |
| y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); | |
| y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); | |
| y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); | |
| accv = vaddq_s32(accv, vi); | |
| } | |
| y[i].s = d * vaddvq_s32(accv); | |
| } | |
| for (int i = 0; i < nb; i++) { | |
| v128_t srcv [8]; | |
| v128_t asrcv[8]; | |
| v128_t amaxv[8]; | |
| for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); | |
| for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); | |
| for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); | |
| for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); | |
| for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); | |
| const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), | |
| wasm_f32x4_extract_lane(amaxv[0], 1)), | |
| MAX(wasm_f32x4_extract_lane(amaxv[0], 2), | |
| wasm_f32x4_extract_lane(amaxv[0], 3))); | |
| const float d = amax / ((1 << 7) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = d; | |
| v128_t accv = wasm_i32x4_splat(0); | |
| for (int j = 0; j < 8; j++) { | |
| const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); | |
| const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); | |
| y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); | |
| y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); | |
| y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); | |
| y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); | |
| accv = wasm_i32x4_add(accv, vi); | |
| } | |
| y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) + | |
| wasm_i32x4_extract_lane(accv, 1) + | |
| wasm_i32x4_extract_lane(accv, 2) + | |
| wasm_i32x4_extract_lane(accv, 3)); | |
| } | |
| for (int i = 0; i < nb; i++) { | |
| // Load elements into 4 AVX vectors | |
| __m256 v0 = _mm256_loadu_ps( x ); | |
| __m256 v1 = _mm256_loadu_ps( x + 8 ); | |
| __m256 v2 = _mm256_loadu_ps( x + 16 ); | |
| __m256 v3 = _mm256_loadu_ps( x + 24 ); | |
| x += 32; | |
| // Compute max(abs(e)) for the block | |
| const __m256 signBit = _mm256_set1_ps( -0.0f ); | |
| __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); | |
| maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); | |
| maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); | |
| maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); | |
| __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); | |
| max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); | |
| max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); | |
| const float maxScalar = _mm_cvtss_f32( max4 ); | |
| // Quantize these floats | |
| const float d = maxScalar / 127.f; | |
| y[i].d = d; | |
| const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; | |
| const __m256 mul = _mm256_set1_ps( id ); | |
| // Apply the multiplier | |
| v0 = _mm256_mul_ps( v0, mul ); | |
| v1 = _mm256_mul_ps( v1, mul ); | |
| v2 = _mm256_mul_ps( v2, mul ); | |
| v3 = _mm256_mul_ps( v3, mul ); | |
| // Round to nearest integer | |
| v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); | |
| v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); | |
| v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); | |
| v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); | |
| // Convert floats to integers | |
| __m256i i0 = _mm256_cvtps_epi32( v0 ); | |
| __m256i i1 = _mm256_cvtps_epi32( v1 ); | |
| __m256i i2 = _mm256_cvtps_epi32( v2 ); | |
| __m256i i3 = _mm256_cvtps_epi32( v3 ); | |
| // Compute the sum of the quants and set y[i].s | |
| y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3))); | |
| // Convert int32 to int16 | |
| i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 | |
| i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 | |
| // Convert int16 to int8 | |
| i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 | |
| // We got our precious signed bytes, but the order is now wrong | |
| // These AVX2 pack instructions process 16-byte pieces independently | |
| // The following instruction is fixing the order | |
| const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); | |
| i0 = _mm256_permutevar8x32_epi32( i0, perm ); | |
| _mm256_storeu_si256((__m256i *)y[i].qs, i0); | |
| // Since we don't have in AVX some necessary functions, | |
| // we split the registers in half and call AVX2 analogs from SSE | |
| __m128i ni0 = _mm256_castsi256_si128( i0 ); | |
| __m128i ni1 = _mm256_extractf128_si256( i0, 1); | |
| __m128i ni2 = _mm256_castsi256_si128( i1 ); | |
| __m128i ni3 = _mm256_extractf128_si256( i1, 1); | |
| __m128i ni4 = _mm256_castsi256_si128( i2 ); | |
| __m128i ni5 = _mm256_extractf128_si256( i2, 1); | |
| __m128i ni6 = _mm256_castsi256_si128( i3 ); | |
| __m128i ni7 = _mm256_extractf128_si256( i3, 1); | |
| // Compute the sum of the quants and set y[i].s | |
| const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); | |
| const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); | |
| y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1)); | |
| // Convert int32 to int16 | |
| ni0 = _mm_packs_epi32( ni0, ni1 ); | |
| ni2 = _mm_packs_epi32( ni2, ni3 ); | |
| ni4 = _mm_packs_epi32( ni4, ni5 ); | |
| ni6 = _mm_packs_epi32( ni6, ni7 ); | |
| // Convert int16 to int8 | |
| ni0 = _mm_packs_epi16( ni0, ni2 ); | |
| ni4 = _mm_packs_epi16( ni4, ni6 ); | |
| _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); | |
| _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); | |
| } | |
| size_t vl = __riscv_vsetvl_e32m4(QK8_1); | |
| for (int i = 0; i < nb; i++) { | |
| // load elements | |
| vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl); | |
| vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl); | |
| vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl); | |
| vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl); | |
| float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); | |
| const float d = amax / ((1 << 7) - 1); | |
| const float id = d ? 1.0f/d : 0.0f; | |
| y[i].d = d; | |
| vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl); | |
| // convert to integer | |
| vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl); | |
| vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl); | |
| // store result | |
| __riscv_vse8_v_i8m1(y[i].qs , vs, vl); | |
| // compute sum for y[i].s | |
| vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl); | |
| vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl); | |
| // set y[i].s | |
| int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); | |
| y[i].s = sum*d; | |
| } | |
| GGML_UNUSED(nb); | |
| // scalar | |
| quantize_row_q8_1_reference(x, y, k); | |
| } | |
| void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) { | |
| static const int qk = QK4_0; | |
| assert(k % qk == 0); | |
| const int nb = k / qk; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| for (int j = 0; j < qk/2; ++j) { | |
| const int x0 = (x[i].qs[j] & 0x0F) - 8; | |
| const int x1 = (x[i].qs[j] >> 4) - 8; | |
| y[i*qk + j + 0 ] = x0*d; | |
| y[i*qk + j + qk/2] = x1*d; | |
| } | |
| } | |
| } | |
| void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) { | |
| static const int qk = QK4_1; | |
| assert(k % qk == 0); | |
| const int nb = k / qk; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| const float m = GGML_FP16_TO_FP32(x[i].m); | |
| for (int j = 0; j < qk/2; ++j) { | |
| const int x0 = (x[i].qs[j] & 0x0F); | |
| const int x1 = (x[i].qs[j] >> 4); | |
| y[i*qk + j + 0 ] = x0*d + m; | |
| y[i*qk + j + qk/2] = x1*d + m; | |
| } | |
| } | |
| } | |
| void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) { | |
| static const int qk = QK5_0; | |
| assert(k % qk == 0); | |
| const int nb = k / qk; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| uint32_t qh; | |
| memcpy(&qh, x[i].qh, sizeof(qh)); | |
| for (int j = 0; j < qk/2; ++j) { | |
| const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; | |
| const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; | |
| const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16; | |
| const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16; | |
| y[i*qk + j + 0 ] = x0*d; | |
| y[i*qk + j + qk/2] = x1*d; | |
| } | |
| } | |
| } | |
| void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) { | |
| static const int qk = QK5_1; | |
| assert(k % qk == 0); | |
| const int nb = k / qk; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| const float m = GGML_FP16_TO_FP32(x[i].m); | |
| uint32_t qh; | |
| memcpy(&qh, x[i].qh, sizeof(qh)); | |
| for (int j = 0; j < qk/2; ++j) { | |
| const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; | |
| const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; | |
| const int x0 = (x[i].qs[j] & 0x0F) | xh_0; | |
| const int x1 = (x[i].qs[j] >> 4) | xh_1; | |
| y[i*qk + j + 0 ] = x0*d + m; | |
| y[i*qk + j + qk/2] = x1*d + m; | |
| } | |
| } | |
| } | |
| void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k) { | |
| static const int qk = QK8_0; | |
| assert(k % qk == 0); | |
| const int nb = k / qk; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| for (int j = 0; j < qk; ++j) { | |
| y[i*qk + j] = x[i].qs[j]*d; | |
| } | |
| } | |
| } | |
| // | |
| // 2-6 bit quantization in super-blocks | |
| // | |
| // | |
| // ===================== Helper functions | |
| // | |
| static inline int nearest_int(float fval) { | |
| assert(fval <= 4194303.f); | |
| float val = fval + 12582912.f; | |
| int i; memcpy(&i, &val, sizeof(int)); | |
| return (i & 0x007fffff) - 0x00400000; | |
| } | |
| static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type, | |
| const float * restrict qw) { | |
| float max = 0; | |
| float amax = 0; | |
| for (int i = 0; i < n; ++i) { | |
| float ax = fabsf(x[i]); | |
| if (ax > amax) { amax = ax; max = x[i]; } | |
| } | |
| if (amax < 1e-30f) { // all zero | |
| for (int i = 0; i < n; ++i) { | |
| L[i] = 0; | |
| } | |
| return 0.f; | |
| } | |
| float iscale = -nmax / max; | |
| if (rmse_type == 0) { | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale * x[i]); | |
| L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); | |
| } | |
| return 1/iscale; | |
| } | |
| bool return_early = false; | |
| if (rmse_type < 0) { | |
| rmse_type = -rmse_type; | |
| return_early = true; | |
| } | |
| float sumlx = 0; | |
| float suml2 = 0; | |
| // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 | |
| for (volatile int i = 0; i < n; ++i) { | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale * x[i]); | |
| l = MAX(-nmax, MIN(nmax-1, l)); | |
| L[i] = l + nmax; | |
| float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i])); | |
| sumlx += w*x[i]*l; | |
| suml2 += w*l*l; | |
| } | |
| float scale = sumlx/suml2; | |
| if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale; | |
| float best = scale * sumlx; | |
| for (int is = -9; is <= 9; ++is) { | |
| if (is == 0) { | |
| continue; | |
| } | |
| iscale = -(nmax + 0.1f*is) / max; | |
| sumlx = suml2 = 0; | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale * x[i]); | |
| l = MAX(-nmax, MIN(nmax-1, l)); | |
| float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i])); | |
| sumlx += w*x[i]*l; | |
| suml2 += w*l*l; | |
| } | |
| if (suml2 > 0 && sumlx*sumlx > best*suml2) { | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale * x[i]); | |
| L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); | |
| } | |
| scale = sumlx/suml2; best = scale*sumlx; | |
| } | |
| } | |
| return scale; | |
| } | |
| static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) { | |
| float max = 0; | |
| float amax = 0; | |
| for (int i = 0; i < n; ++i) { | |
| float ax = fabsf(x[i]); | |
| if (ax > amax) { amax = ax; max = x[i]; } | |
| } | |
| if (!amax) { // all zero | |
| for (int i = 0; i < n; ++i) { L[i] = 0; } | |
| return 0.f; | |
| } | |
| float iscale = -nmax / max; | |
| if (do_rmse) { | |
| float sumlx = 0; | |
| float suml2 = 0; | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale * x[i]); | |
| l = MAX(-nmax, MIN(nmax-1, l)); | |
| L[i] = l; | |
| float w = x[i]*x[i]; | |
| sumlx += w*x[i]*l; | |
| suml2 += w*l*l; | |
| } | |
| for (int itry = 0; itry < 5; ++itry) { | |
| int n_changed = 0; | |
| for (int i = 0; i < n; ++i) { | |
| float w = x[i]*x[i]; | |
| float slx = sumlx - w*x[i]*L[i]; | |
| if (slx > 0) { | |
| float sl2 = suml2 - w*L[i]*L[i]; | |
| int new_l = nearest_int(x[i] * sl2 / slx); | |
| new_l = MAX(-nmax, MIN(nmax-1, new_l)); | |
| if (new_l != L[i]) { | |
| slx += w*x[i]*new_l; | |
| sl2 += w*new_l*new_l; | |
| if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) { | |
| L[i] = new_l; sumlx = slx; suml2 = sl2; | |
| ++n_changed; | |
| } | |
| } | |
| } | |
| } | |
| if (!n_changed) { | |
| break; | |
| } | |
| } | |
| for (int i = 0; i < n; ++i) { | |
| L[i] += nmax; | |
| } | |
| return sumlx / suml2; | |
| } | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale * x[i]); | |
| l = MAX(-nmax, MIN(nmax-1, l)); | |
| L[i] = l + nmax; | |
| } | |
| return 1/iscale; | |
| } | |
| static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min, | |
| int ntry, float alpha) { | |
| float min = x[0]; | |
| float max = x[0]; | |
| for (int i = 1; i < n; ++i) { | |
| if (x[i] < min) min = x[i]; | |
| if (x[i] > max) max = x[i]; | |
| } | |
| if (max == min) { | |
| for (int i = 0; i < n; ++i) L[i] = 0; | |
| *the_min = 0; | |
| return 0.f; | |
| } | |
| if (min > 0) min = 0; | |
| float iscale = nmax/(max - min); | |
| float scale = 1/iscale; | |
| for (int itry = 0; itry < ntry; ++itry) { | |
| float sumlx = 0; int suml2 = 0; | |
| bool did_change = false; | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale*(x[i] - min)); | |
| l = MAX(0, MIN(nmax, l)); | |
| if (l != L[i]) { | |
| L[i] = l; | |
| did_change = true; | |
| } | |
| sumlx += (x[i] - min)*l; | |
| suml2 += l*l; | |
| } | |
| scale = sumlx/suml2; | |
| float sum = 0; | |
| for (int i = 0; i < n; ++i) { | |
| sum += x[i] - scale*L[i]; | |
| } | |
| min = alpha*min + (1 - alpha)*sum/n; | |
| if (min > 0) min = 0; | |
| iscale = 1/scale; | |
| if (!did_change) break; | |
| } | |
| *the_min = -min; | |
| return scale; | |
| } | |
| static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights, | |
| uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux, | |
| float rmin, float rdelta, int nstep, bool use_mad) { | |
| float min = x[0]; | |
| float max = x[0]; | |
| float sum_w = weights[0]; | |
| float sum_x = sum_w * x[0]; | |
| // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 | |
| for (volatile int i = 1; i < n; ++i) { | |
| for (int i = 1; i < n; ++i) { | |
| if (x[i] < min) min = x[i]; | |
| if (x[i] > max) max = x[i]; | |
| float w = weights[i]; | |
| sum_w += w; | |
| sum_x += w * x[i]; | |
| } | |
| if (min > 0) min = 0; | |
| if (max == min) { | |
| for (int i = 0; i < n; ++i) L[i] = 0; | |
| *the_min = -min; | |
| return 0.f; | |
| } | |
| float iscale = nmax/(max - min); | |
| float scale = 1/iscale; | |
| float best_mad = 0; | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale*(x[i] - min)); | |
| L[i] = MAX(0, MIN(nmax, l)); | |
| float diff = scale * L[i] + min - x[i]; | |
| diff = use_mad ? fabsf(diff) : diff * diff; | |
| float w = weights[i]; | |
| best_mad += w * diff; | |
| } | |
| if (nstep < 1) { | |
| *the_min = -min; | |
| return scale; | |
| } | |
| for (int is = 0; is <= nstep; ++is) { | |
| iscale = (rmin + rdelta*is + nmax)/(max - min); | |
| float sum_l = 0, sum_l2 = 0, sum_xl = 0; | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale*(x[i] - min)); | |
| l = MAX(0, MIN(nmax, l)); | |
| Laux[i] = l; | |
| float w = weights[i]; | |
| sum_l += w*l; | |
| sum_l2 += w*l*l; | |
| sum_xl += w*l*x[i]; | |
| } | |
| float D = sum_w * sum_l2 - sum_l * sum_l; | |
| if (D > 0) { | |
| float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D; | |
| float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D; | |
| if (this_min > 0) { | |
| this_min = 0; | |
| this_scale = sum_xl / sum_l2; | |
| } | |
| float mad = 0; | |
| for (int i = 0; i < n; ++i) { | |
| float diff = this_scale * Laux[i] + this_min - x[i]; | |
| diff = use_mad ? fabsf(diff) : diff * diff; | |
| float w = weights[i]; | |
| mad += w * diff; | |
| } | |
| if (mad < best_mad) { | |
| for (int i = 0; i < n; ++i) { | |
| L[i] = Laux[i]; | |
| } | |
| best_mad = mad; | |
| scale = this_scale; | |
| min = this_min; | |
| } | |
| } | |
| } | |
| *the_min = -min; | |
| return scale; | |
| } | |
| static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) { | |
| if (j < 4) { | |
| *d = q[j] & 63; *m = q[j + 4] & 63; | |
| } else { | |
| *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); | |
| *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); | |
| } | |
| } | |
| //========================- 2-bit (de)-quantization | |
| void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| uint8_t L[QK_K]; | |
| uint8_t Laux[16]; | |
| float weights[16]; | |
| float mins[QK_K/16]; | |
| float scales[QK_K/16]; | |
| const float q4scale = 15.f; | |
| for (int i = 0; i < nb; i++) { | |
| float max_scale = 0; // as we are deducting the min, scales are always positive | |
| float max_min = 0; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]); | |
| scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true); | |
| float scale = scales[j]; | |
| if (scale > max_scale) { | |
| max_scale = scale; | |
| } | |
| float min = mins[j]; | |
| if (min > max_min) { | |
| max_min = min; | |
| } | |
| } | |
| if (max_scale > 0) { | |
| float iscale = q4scale/max_scale; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| int l = nearest_int(iscale*scales[j]); | |
| y[i].scales[j] = l; | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale); | |
| } else { | |
| for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0; | |
| y[i].d = GGML_FP32_TO_FP16(0.f); | |
| } | |
| if (max_min > 0) { | |
| float iscale = q4scale/max_min; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| int l = nearest_int(iscale*mins[j]); | |
| y[i].scales[j] |= (l << 4); | |
| } | |
| y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale); | |
| } else { | |
| y[i].dmin = GGML_FP32_TO_FP16(0.f); | |
| } | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF); | |
| if (!d) continue; | |
| const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4); | |
| for (int ii = 0; ii < 16; ++ii) { | |
| int l = nearest_int((x[16*j + ii] + dm)/d); | |
| l = MAX(0, MIN(3, l)); | |
| L[16*j + ii] = l; | |
| } | |
| } | |
| for (int j = 0; j < QK_K; j += 128) { | |
| for (int l = 0; l < 32; ++l) { | |
| y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); | |
| } | |
| } | |
| for (int l = 0; l < 16; ++l) { | |
| y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6); | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| const float min = GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * q = x[i].qs; | |
| int is = 0; | |
| float dl, ml; | |
| for (int n = 0; n < QK_K; n += 128) { | |
| int shift = 0; | |
| for (int j = 0; j < 4; ++j) { | |
| uint8_t sc = x[i].scales[is++]; | |
| dl = d * (sc & 0xF); ml = min * (sc >> 4); | |
| for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml; | |
| sc = x[i].scales[is++]; | |
| dl = d * (sc & 0xF); ml = min * (sc >> 4); | |
| for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml; | |
| shift += 2; | |
| } | |
| q += 32; | |
| } | |
| float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4); | |
| float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4); | |
| float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4); | |
| float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4); | |
| for (int l = 0; l < 16; ++l) { | |
| y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1; | |
| y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2; | |
| y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3; | |
| y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4; | |
| } | |
| y += QK_K; | |
| } | |
| } | |
| void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) { | |
| quantize_row_q2_K_reference(x, vy, k); | |
| } | |
| size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { | |
| (void)hist; // TODO: collect histograms | |
| for (int j = 0; j < n; j += k) { | |
| block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K; | |
| quantize_row_q2_K_reference(src + j, y, k); | |
| } | |
| return (n/QK_K*sizeof(block_q2_K)); | |
| } | |
| static float make_qkx3_quants(int n, int nmax, const float * restrict x, const float * restrict weights, | |
| uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux, | |
| float rmin, float rdelta, int nstep, bool use_mad) { | |
| float min = x[0]; | |
| float max = x[0]; | |
| float sum_w = weights ? weights[0] : x[0]*x[0]; | |
| float sum_x = sum_w * x[0]; | |
| // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 | |
| for (volatile int i = 1; i < n; ++i) { | |
| for (int i = 1; i < n; ++i) { | |
| if (x[i] < min) min = x[i]; | |
| if (x[i] > max) max = x[i]; | |
| float w = weights ? weights[i] : x[i]*x[i]; | |
| sum_w += w; | |
| sum_x += w * x[i]; | |
| } | |
| if (min > 0) { | |
| min = 0; | |
| } | |
| if (max <= min) { | |
| memset(L, 0, n); | |
| *the_min = -min; | |
| return 0.f; | |
| } | |
| float iscale = nmax/(max - min); | |
| float scale = 1/iscale; | |
| float best_mad = 0; | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale*(x[i] - min)); | |
| L[i] = MAX(0, MIN(nmax, l)); | |
| float diff = scale * L[i] + min - x[i]; | |
| diff = use_mad ? fabsf(diff) : diff*diff; | |
| float w = weights ? weights[i] : x[i]*x[i]; | |
| best_mad += w * diff; | |
| } | |
| if (nstep < 1) { | |
| *the_min = -min; | |
| return scale; | |
| } | |
| for (int is = 0; is <= nstep; ++is) { | |
| iscale = (rmin + rdelta*is + nmax)/(max - min); | |
| float sum_l = 0, sum_l2 = 0, sum_xl = 0; | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale*(x[i] - min)); | |
| l = MAX(0, MIN(nmax, l)); | |
| Laux[i] = l; | |
| float w = weights ? weights[i] : x[i]*x[i]; | |
| sum_l += w*l; | |
| sum_l2 += w*l*l; | |
| sum_xl += w*l*x[i]; | |
| } | |
| float D = sum_w * sum_l2 - sum_l * sum_l; | |
| if (D > 0) { | |
| float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D; | |
| float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D; | |
| if (this_min > 0) { | |
| this_min = 0; | |
| this_scale = sum_xl / sum_l2; | |
| } | |
| float mad = 0; | |
| for (int i = 0; i < n; ++i) { | |
| float diff = this_scale * Laux[i] + this_min - x[i]; | |
| diff = use_mad ? fabsf(diff) : diff*diff; | |
| float w = weights ? weights[i] : x[i]*x[i]; | |
| mad += w * diff; | |
| } | |
| if (mad < best_mad) { | |
| for (int i = 0; i < n; ++i) { | |
| L[i] = Laux[i]; | |
| } | |
| best_mad = mad; | |
| scale = this_scale; | |
| min = this_min; | |
| } | |
| } | |
| } | |
| *the_min = -min; | |
| return scale; | |
| } | |
| static float make_qp_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, const float * quant_weights) { | |
| float max = 0; | |
| for (int i = 0; i < n; ++i) { | |
| max = MAX(max, x[i]); | |
| } | |
| if (!max) { // all zero | |
| for (int i = 0; i < n; ++i) { L[i] = 0; } | |
| return 0.f; | |
| } | |
| float iscale = nmax / max; | |
| for (int i = 0; i < n; ++i) { | |
| L[i] = nearest_int(iscale * x[i]); | |
| } | |
| float scale = 1/iscale; | |
| float best_mse = 0; | |
| for (int i = 0; i < n; ++i) { | |
| float diff = x[i] - scale*L[i]; | |
| float w = quant_weights[i]; | |
| best_mse += w*diff*diff; | |
| } | |
| for (int is = -4; is <= 4; ++is) { | |
| if (is == 0) continue; | |
| float iscale_is = (0.1f*is + nmax)/max; | |
| float scale_is = 1/iscale_is; | |
| float mse = 0; | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale_is*x[i]); | |
| l = MIN(nmax, l); | |
| float diff = x[i] - scale_is*l; | |
| float w = quant_weights[i]; | |
| mse += w*diff*diff; | |
| } | |
| if (mse < best_mse) { | |
| best_mse = mse; | |
| iscale = iscale_is; | |
| } | |
| } | |
| float sumlx = 0; | |
| float suml2 = 0; | |
| for (int i = 0; i < n; ++i) { | |
| int l = nearest_int(iscale * x[i]); | |
| l = MIN(nmax, l); | |
| L[i] = l; | |
| float w = quant_weights[i]; | |
| sumlx += w*x[i]*l; | |
| suml2 += w*l*l; | |
| } | |
| for (int itry = 0; itry < 5; ++itry) { | |
| int n_changed = 0; | |
| for (int i = 0; i < n; ++i) { | |
| float w = quant_weights[i]; | |
| float slx = sumlx - w*x[i]*L[i]; | |
| float sl2 = suml2 - w*L[i]*L[i]; | |
| if (slx > 0 && sl2 > 0) { | |
| int new_l = nearest_int(x[i] * sl2 / slx); | |
| new_l = MIN(nmax, new_l); | |
| if (new_l != L[i]) { | |
| slx += w*x[i]*new_l; | |
| sl2 += w*new_l*new_l; | |
| if (slx*slx*suml2 > sumlx*sumlx*sl2) { | |
| L[i] = new_l; sumlx = slx; suml2 = sl2; | |
| ++n_changed; | |
| } | |
| } | |
| } | |
| } | |
| if (!n_changed) { | |
| break; | |
| } | |
| } | |
| return sumlx / suml2; | |
| } | |
| static void quantize_row_q2_K_impl(const float * restrict x, block_q2_K * restrict y, int k, const float * restrict quant_weights) { | |
| GGML_ASSERT(quant_weights); | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| const bool requantize = true; | |
| uint8_t L[QK_K]; | |
| uint8_t Laux[16]; | |
| float mins[QK_K/16]; | |
| float scales[QK_K/16]; | |
| float sw[QK_K/16]; | |
| float weight[QK_K/16]; | |
| uint8_t Ls[QK_K/16], Lm[QK_K/16]; | |
| for (int i = 0; i < nb; i++) { | |
| memset(sw, 0, QK_K/16*sizeof(float)); | |
| float sumx2 = 0; | |
| for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j]; | |
| float sigma2 = sumx2/QK_K; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| const float * restrict qw = quant_weights + QK_K * i + 16*j; | |
| for (int l = 0; l < QK_K/16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j + l]*x[16*j + l]); | |
| for (int l = 0; l < QK_K/16; ++l) sw[j] += weight[l]; | |
| scales[j] = make_qkx3_quants(QK_K/16, 3, x + 16*j, weight, L + 16*j, &mins[j], Laux, -0.9f, 0.05f, 36, false); | |
| } | |
| float dm = make_qp_quants(QK_K/16, 15, scales, Ls, sw); | |
| float mm = make_qp_quants(QK_K/16, 15, mins, Lm, sw); | |
| y[i].d = GGML_FP32_TO_FP16(dm); | |
| y[i].dmin = GGML_FP32_TO_FP16(mm); | |
| dm = GGML_FP16_TO_FP32(y[i].d); | |
| mm = GGML_FP16_TO_FP32(y[i].dmin); | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| y[i].scales[j] = Ls[j] | (Lm[j] << 4); | |
| } | |
| if (requantize) { | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| const float d = dm * (y[i].scales[j] & 0xF); | |
| if (!d) continue; | |
| const float m = mm * (y[i].scales[j] >> 4); | |
| for (int ii = 0; ii < 16; ++ii) { | |
| int l = nearest_int((x[16*j + ii] + m)/d); | |
| l = MAX(0, MIN(3, l)); | |
| L[16*j + ii] = l; | |
| } | |
| } | |
| } | |
| for (int j = 0; j < QK_K; j += 128) { | |
| for (int l = 0; l < 32; ++l) { | |
| y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); | |
| } | |
| } | |
| for (int l = 0; l < 16; ++l) { | |
| y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6); | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| size_t quantize_q2_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| size_t row_size = ggml_row_size(GGML_TYPE_Q2_K, n_per_row); | |
| if (!quant_weights) { | |
| quantize_row_q2_K_reference(src, dst, nrow*n_per_row); | |
| } | |
| else { | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_q2_K_impl(src, (block_q2_K*)qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += row_size; | |
| } | |
| } | |
| return nrow * row_size; | |
| } | |
| //========================= 3-bit (de)-quantization | |
| void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| int8_t L[QK_K]; | |
| float scales[QK_K / 16]; | |
| for (int i = 0; i < nb; i++) { | |
| float max_scale = 0; | |
| float amax = 0; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true); | |
| float scale = fabsf(scales[j]); | |
| if (scale > amax) { | |
| amax = scale; max_scale = scales[j]; | |
| } | |
| } | |
| memset(y[i].scales, 0, 12); | |
| if (max_scale) { | |
| float iscale = -32.f/max_scale; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| int8_t l = nearest_int(iscale*scales[j]); | |
| l = MAX(-32, MIN(31, l)) + 32; | |
| if (j < 8) { | |
| y[i].scales[j] = l & 0xF; | |
| } else { | |
| y[i].scales[j-8] |= ((l & 0xF) << 4); | |
| } | |
| l >>= 4; | |
| y[i].scales[j%4 + 8] |= (l << (2*(j/4))); | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(1/iscale); | |
| } else { | |
| y[i].d = GGML_FP32_TO_FP16(0.f); | |
| } | |
| int8_t sc; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4; | |
| sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32; | |
| float d = GGML_FP16_TO_FP32(y[i].d) * sc; | |
| if (!d) { | |
| continue; | |
| } | |
| for (int ii = 0; ii < 16; ++ii) { | |
| int l = nearest_int(x[16*j + ii]/d); | |
| l = MAX(-4, MIN(3, l)); | |
| L[16*j + ii] = l + 4; | |
| } | |
| } | |
| if (max_scale) { | |
| float iscale = -8.f/max_scale; | |
| for (int j = 0; j < QK_K/16; j+=2) { | |
| int l1 = nearest_int(iscale*scales[j]); | |
| l1 = 8 + MAX(-8, MIN(7, l1)); | |
| int l2 = nearest_int(iscale*scales[j+1]); | |
| l2 = 8 + MAX(-8, MIN(7, l2)); | |
| y[i].scales[j/2] = l1 | (l2 << 4); | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(1/iscale); | |
| } else { | |
| for (int j = 0; j < QK_K/16; j+=2) { | |
| y[i].scales[j/2] = 0; | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(0.f); | |
| } | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4; | |
| float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8); | |
| if (!d) { | |
| continue; | |
| } | |
| for (int ii = 0; ii < 16; ++ii) { | |
| int l = nearest_int(x[16*j + ii]/d); | |
| l = MAX(-4, MIN(3, l)); | |
| L[16*j + ii] = l + 4; | |
| } | |
| } | |
| memset(y[i].hmask, 0, QK_K/8); | |
| // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc. | |
| int m = 0; | |
| uint8_t hm = 1; | |
| for (int j = 0; j < QK_K; ++j) { | |
| if (L[j] > 3) { | |
| y[i].hmask[m] |= hm; | |
| L[j] -= 4; | |
| } | |
| if (++m == QK_K/8) { | |
| m = 0; hm <<= 1; | |
| } | |
| } | |
| for (int j = 0; j < QK_K; j += 128) { | |
| for (int l = 0; l < 32; ++l) { | |
| y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); | |
| } | |
| } | |
| for (int l = 0; l < 16; ++l) { | |
| y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6); | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| const uint32_t kmask1 = 0x03030303; | |
| const uint32_t kmask2 = 0x0f0f0f0f; | |
| uint32_t aux[4]; | |
| const int8_t * scales = (const int8_t*)aux; | |
| for (int i = 0; i < nb; i++) { | |
| const float d_all = GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q = x[i].qs; | |
| const uint8_t * restrict hm = x[i].hmask; | |
| uint8_t m = 1; | |
| memcpy(aux, x[i].scales, 12); | |
| uint32_t tmp = aux[2]; | |
| aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); | |
| aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); | |
| aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); | |
| aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); | |
| int is = 0; | |
| float dl; | |
| for (int n = 0; n < QK_K; n += 128) { | |
| int shift = 0; | |
| for (int j = 0; j < 4; ++j) { | |
| dl = d_all * (scales[is++] - 32); | |
| for (int l = 0; l < 16; ++l) { | |
| *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4)); | |
| } | |
| dl = d_all * (scales[is++] - 32); | |
| for (int l = 0; l < 16; ++l) { | |
| *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4)); | |
| } | |
| shift += 2; | |
| m <<= 1; | |
| } | |
| q += 32; | |
| } | |
| } | |
| } | |
| void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| assert(QK_K == 64); | |
| const int nb = k / QK_K; | |
| for (int i = 0; i < nb; i++) { | |
| const float d_all = GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q = x[i].qs; | |
| const uint8_t * restrict hm = x[i].hmask; | |
| const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8); | |
| const float d2 = d_all * ((x[i].scales[0] >> 4) - 8); | |
| const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8); | |
| const float d4 = d_all * ((x[i].scales[1] >> 4) - 8); | |
| for (int l=0; l<8; ++l) { | |
| uint8_t h = hm[l]; | |
| y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4)); | |
| y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4)); | |
| y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4)); | |
| y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4)); | |
| y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4)); | |
| y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4)); | |
| y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4)); | |
| y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4)); | |
| } | |
| y += QK_K; | |
| } | |
| } | |
| void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) { | |
| quantize_row_q3_K_reference(x, vy, k); | |
| } | |
| size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { | |
| (void)hist; // TODO: collect histograms | |
| for (int j = 0; j < n; j += k) { | |
| block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K; | |
| quantize_row_q3_K_reference(src + j, y, k); | |
| } | |
| return (n/QK_K*sizeof(block_q3_K)); | |
| } | |
| static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restrict y, int n_per_row, const float * restrict quant_weights) { | |
| (void)quant_weights; | |
| quantize_row_q3_K_reference(x, y, n_per_row); | |
| assert(n_per_row % QK_K == 0); | |
| const int nb = n_per_row / QK_K; | |
| int8_t L[QK_K]; | |
| float scales[QK_K / 16]; | |
| float weight[16]; | |
| float sw[QK_K / 16]; | |
| int8_t Ls[QK_K / 16]; | |
| for (int i = 0; i < nb; i++) { | |
| float sumx2 = 0; | |
| for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j]; | |
| float sigma2 = 2*sumx2/QK_K; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| if (quant_weights) { | |
| const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL; | |
| for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]); | |
| } else { | |
| for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l]; | |
| } | |
| float sumw = 0; | |
| for (int l = 0; l < 16; ++l) sumw += weight[l]; | |
| sw[j] = sumw; | |
| scales[j] = make_qx_quants(16, 4, x + 16*j, L + 16*j, 1, weight); | |
| } | |
| memset(y[i].scales, 0, 12); | |
| float d_block = make_qx_quants(QK_K/16, 32, scales, Ls, 1, sw); | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| int l = Ls[j]; | |
| if (j < 8) { | |
| y[i].scales[j] = l & 0xF; | |
| } else { | |
| y[i].scales[j-8] |= ((l & 0xF) << 4); | |
| } | |
| l >>= 4; | |
| y[i].scales[j%4 + 8] |= (l << (2*(j/4))); | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(d_block); | |
| int8_t sc; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4; | |
| sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32; | |
| float d = GGML_FP16_TO_FP32(y[i].d) * sc; | |
| if (!d) { | |
| continue; | |
| } | |
| for (int ii = 0; ii < 16; ++ii) { | |
| int l = nearest_int(x[16*j + ii]/d); | |
| l = MAX(-4, MIN(3, l)); | |
| L[16*j + ii] = l + 4; | |
| } | |
| } | |
| memset(y[i].hmask, 0, QK_K/8); | |
| // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc. | |
| int m = 0; | |
| uint8_t hm = 1; | |
| for (int j = 0; j < QK_K; ++j) { | |
| if (L[j] > 3) { | |
| y[i].hmask[m] |= hm; | |
| L[j] -= 4; | |
| } | |
| if (++m == QK_K/8) { | |
| m = 0; hm <<= 1; | |
| } | |
| } | |
| for (int j = 0; j < QK_K; j += 128) { | |
| for (int l = 0; l < 32; ++l) { | |
| y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); | |
| } | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| size_t quantize_q3_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| size_t row_size = ggml_row_size(GGML_TYPE_Q3_K, n_per_row); | |
| if (!quant_weights) { | |
| quantize_row_q3_K_reference(src, dst, nrow*n_per_row); | |
| } | |
| else { | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_q3_K_impl(src, (block_q3_K*)qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += row_size; | |
| } | |
| } | |
| return nrow * row_size; | |
| } | |
| // ====================== 4-bit (de)-quantization | |
| void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| uint8_t L[QK_K]; | |
| uint8_t Laux[32]; | |
| float weights[32]; | |
| float mins[QK_K/32]; | |
| float scales[QK_K/32]; | |
| for (int i = 0; i < nb; i++) { | |
| float max_scale = 0; // as we are deducting the min, scales are always positive | |
| float max_min = 0; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f); | |
| float sum_x2 = 0; | |
| for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l]; | |
| float av_x = sqrtf(sum_x2/32); | |
| for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); | |
| scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false); | |
| float scale = scales[j]; | |
| if (scale > max_scale) { | |
| max_scale = scale; | |
| } | |
| float min = mins[j]; | |
| if (min > max_min) { | |
| max_min = min; | |
| } | |
| } | |
| float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f; | |
| float inv_min = max_min > 0 ? 63.f/max_min : 0.f; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| uint8_t ls = nearest_int(inv_scale*scales[j]); | |
| uint8_t lm = nearest_int(inv_min*mins[j]); | |
| ls = MIN(63, ls); | |
| lm = MIN(63, lm); | |
| if (j < 4) { | |
| y[i].scales[j] = ls; | |
| y[i].scales[j+4] = lm; | |
| } else { | |
| y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); | |
| y[i].scales[j-4] |= ((ls >> 4) << 6); | |
| y[i].scales[j-0] |= ((lm >> 4) << 6); | |
| } | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(max_scale/63.f); | |
| y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f); | |
| uint8_t sc, m; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| get_scale_min_k4(j, y[i].scales, &sc, &m); | |
| const float d = GGML_FP16_TO_FP32(y[i].d) * sc; | |
| if (!d) continue; | |
| const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; | |
| for (int ii = 0; ii < 32; ++ii) { | |
| int l = nearest_int((x[32*j + ii] + dm)/d); | |
| l = MAX(0, MIN(15, l)); | |
| L[32*j + ii] = l; | |
| } | |
| } | |
| const float s_factor = 15.f; | |
| float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f; | |
| float inv_min = max_min > 0 ? s_factor/max_min : 0.f; | |
| int d1 = nearest_int(inv_scale*scales[0]); | |
| int m1 = nearest_int(inv_min*mins[0]); | |
| int d2 = nearest_int(inv_scale*scales[1]); | |
| int m2 = nearest_int(inv_min*mins[1]); | |
| y[i].scales[0] = d1 | (m1 << 4); | |
| y[i].scales[1] = d2 | (m2 << 4); | |
| y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor); | |
| y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor); | |
| float sumlx = 0; | |
| int suml2 = 0; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| const uint8_t sd = y[i].scales[j] & 0xF; | |
| const uint8_t sm = y[i].scales[j] >> 4; | |
| const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd; | |
| if (!d) continue; | |
| const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm; | |
| for (int ii = 0; ii < 32; ++ii) { | |
| int l = nearest_int((x[32*j + ii] + m)/d); | |
| l = MAX(0, MIN(15, l)); | |
| L[32*j + ii] = l; | |
| sumlx += (x[32*j + ii] + m)*l*sd; | |
| suml2 += l*l*sd*sd; | |
| } | |
| } | |
| if (suml2) { | |
| y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2); | |
| } | |
| uint8_t * q = y[i].qs; | |
| for (int j = 0; j < QK_K; j += 64) { | |
| for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4); | |
| q += 32; | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| for (int i = 0; i < nb; i++) { | |
| const uint8_t * q = x[i].qs; | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| const float min = GGML_FP16_TO_FP32(x[i].dmin); | |
| int is = 0; | |
| uint8_t sc, m; | |
| for (int j = 0; j < QK_K; j += 64) { | |
| get_scale_min_k4(is + 0, x[i].scales, &sc, &m); | |
| const float d1 = d * sc; const float m1 = min * m; | |
| get_scale_min_k4(is + 1, x[i].scales, &sc, &m); | |
| const float d2 = d * sc; const float m2 = min * m; | |
| for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1; | |
| for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2; | |
| q += 32; is += 2; | |
| } | |
| const float dall = GGML_FP16_TO_FP32(x[i].d[0]); | |
| const float mall = GGML_FP16_TO_FP32(x[i].d[1]); | |
| const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4); | |
| const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4); | |
| for (int l = 0; l < 32; ++l) { | |
| y[l+ 0] = d1 * (q[l] & 0xF) - m1; | |
| y[l+32] = d2 * (q[l] >> 4) - m2; | |
| } | |
| y += QK_K; | |
| } | |
| } | |
| void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) { | |
| assert(k % QK_K == 0); | |
| block_q4_K * restrict y = vy; | |
| quantize_row_q4_K_reference(x, y, k); | |
| } | |
| size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { | |
| assert(k % QK_K == 0); | |
| (void)hist; // TODO: collect histograms | |
| for (int j = 0; j < n; j += k) { | |
| block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K; | |
| quantize_row_q4_K_reference(src + j, y, k); | |
| } | |
| return (n/QK_K*sizeof(block_q4_K)); | |
| } | |
| static void quantize_row_q4_K_impl(const float * restrict x, block_q4_K * restrict y, int n_per_row, const float * quant_weights) { | |
| (void)quant_weights; | |
| quantize_row_q4_K_reference(x, y, n_per_row); | |
| assert(n_per_row % QK_K == 0); | |
| const int nb = n_per_row / QK_K; | |
| uint8_t L[QK_K]; | |
| uint8_t Laux[32]; | |
| uint8_t Ls[QK_K/32]; | |
| uint8_t Lm[QK_K/32]; | |
| float weights[32]; | |
| float sw[QK_K/32]; | |
| float mins[QK_K/32]; | |
| float scales[QK_K/32]; | |
| for (int i = 0; i < nb; i++) { | |
| float sum_x2 = 0; | |
| for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l]; | |
| float sigma2 = 2*sum_x2/QK_K; | |
| float av_x = sqrtf(sigma2); | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| if (quant_weights) { | |
| const float * qw = quant_weights + QK_K*i + 32*j; | |
| for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]); | |
| } else { | |
| for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); | |
| } | |
| float sumw = 0; | |
| for (int l = 0; l < 32; ++l) sumw += weights[l]; | |
| sw[j] = sumw; | |
| scales[j] = make_qkx3_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false); | |
| } | |
| float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw); | |
| float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw); | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| uint8_t ls = Ls[j]; | |
| uint8_t lm = Lm[j]; | |
| if (j < 4) { | |
| y[i].scales[j] = ls; | |
| y[i].scales[j+4] = lm; | |
| } else { | |
| y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); | |
| y[i].scales[j-4] |= ((ls >> 4) << 6); | |
| y[i].scales[j-0] |= ((lm >> 4) << 6); | |
| } | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(d_block); | |
| y[i].dmin = GGML_FP32_TO_FP16(m_block); | |
| uint8_t sc, m; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| get_scale_min_k4(j, y[i].scales, &sc, &m); | |
| const float d = GGML_FP16_TO_FP32(y[i].d) * sc; | |
| if (!d) continue; | |
| const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; | |
| for (int ii = 0; ii < 32; ++ii) { | |
| int l = nearest_int((x[32*j + ii] + dm)/d); | |
| l = MAX(0, MIN(15, l)); | |
| L[32*j + ii] = l; | |
| } | |
| } | |
| uint8_t * q = y[i].qs; | |
| for (int j = 0; j < QK_K; j += 64) { | |
| for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4); | |
| q += 32; | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| size_t quantize_q4_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| size_t row_size = ggml_row_size(GGML_TYPE_Q4_K, n_per_row); | |
| if (!quant_weights) { | |
| quantize_row_q4_K_reference(src, dst, nrow*n_per_row); | |
| } | |
| else { | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_q4_K_impl(src, (block_q4_K*)qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += row_size; | |
| } | |
| } | |
| return nrow * row_size; | |
| } | |
| // ====================== 5-bit (de)-quantization | |
| void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| uint8_t L[QK_K]; | |
| float mins[QK_K/32]; | |
| float scales[QK_K/32]; | |
| float weights[32]; | |
| uint8_t Laux[32]; | |
| int8_t L[QK_K]; | |
| float scales[QK_K/16]; | |
| for (int i = 0; i < nb; i++) { | |
| float max_scale = 0; // as we are deducting the min, scales are always positive | |
| float max_min = 0; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f); | |
| float sum_x2 = 0; | |
| for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l]; | |
| float av_x = sqrtf(sum_x2/32); | |
| for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); | |
| scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false); | |
| float scale = scales[j]; | |
| if (scale > max_scale) { | |
| max_scale = scale; | |
| } | |
| float min = mins[j]; | |
| if (min > max_min) { | |
| max_min = min; | |
| } | |
| } | |
| float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f; | |
| float inv_min = max_min > 0 ? 63.f/max_min : 0.f; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| uint8_t ls = nearest_int(inv_scale*scales[j]); | |
| uint8_t lm = nearest_int(inv_min*mins[j]); | |
| ls = MIN(63, ls); | |
| lm = MIN(63, lm); | |
| if (j < 4) { | |
| y[i].scales[j] = ls; | |
| y[i].scales[j+4] = lm; | |
| } else { | |
| y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); | |
| y[i].scales[j-4] |= ((ls >> 4) << 6); | |
| y[i].scales[j-0] |= ((lm >> 4) << 6); | |
| } | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(max_scale/63.f); | |
| y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f); | |
| uint8_t sc, m; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| get_scale_min_k4(j, y[i].scales, &sc, &m); | |
| const float d = GGML_FP16_TO_FP32(y[i].d) * sc; | |
| if (!d) continue; | |
| const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; | |
| for (int ii = 0; ii < 32; ++ii) { | |
| int l = nearest_int((x[32*j + ii] + dm)/d); | |
| l = MAX(0, MIN(31, l)); | |
| L[32*j + ii] = l; | |
| } | |
| } | |
| uint8_t * restrict qh = y[i].qh; | |
| uint8_t * restrict ql = y[i].qs; | |
| memset(qh, 0, QK_K/8); | |
| uint8_t m1 = 1, m2 = 2; | |
| for (int n = 0; n < QK_K; n += 64) { | |
| for (int j = 0; j < 32; ++j) { | |
| int l1 = L[n + j]; | |
| if (l1 > 15) { | |
| l1 -= 16; qh[j] |= m1; | |
| } | |
| int l2 = L[n + j + 32]; | |
| if (l2 > 15) { | |
| l2 -= 16; qh[j] |= m2; | |
| } | |
| ql[j] = l1 | (l2 << 4); | |
| } | |
| m1 <<= 2; m2 <<= 2; | |
| ql += 32; | |
| } | |
| float max_scale = 0, amax = 0; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1, NULL); | |
| float abs_scale = fabsf(scales[j]); | |
| if (abs_scale > amax) { | |
| amax = abs_scale; | |
| max_scale = scales[j]; | |
| } | |
| } | |
| float iscale = -128.f/max_scale; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| int l = nearest_int(iscale*scales[j]); | |
| y[i].scales[j] = MAX(-128, MIN(127, l)); | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(1/iscale); | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j]; | |
| if (!d) continue; | |
| for (int ii = 0; ii < 16; ++ii) { | |
| int l = nearest_int(x[16*j + ii]/d); | |
| l = MAX(-16, MIN(15, l)); | |
| L[16*j + ii] = l + 16; | |
| } | |
| } | |
| uint8_t * restrict qh = y[i].qh; | |
| uint8_t * restrict ql = y[i].qs; | |
| memset(qh, 0, QK_K/8); | |
| for (int j = 0; j < 32; ++j) { | |
| int jm = j%8; | |
| int is = j/8; | |
| int l1 = L[j]; | |
| if (l1 > 15) { | |
| l1 -= 16; qh[jm] |= (1 << is); | |
| } | |
| int l2 = L[j + 32]; | |
| if (l2 > 15) { | |
| l2 -= 16; qh[jm] |= (1 << (4 + is)); | |
| } | |
| ql[j] = l1 | (l2 << 4); | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| for (int i = 0; i < nb; i++) { | |
| const uint8_t * ql = x[i].qs; | |
| const uint8_t * qh = x[i].qh; | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| const float min = GGML_FP16_TO_FP32(x[i].dmin); | |
| int is = 0; | |
| uint8_t sc, m; | |
| uint8_t u1 = 1, u2 = 2; | |
| for (int j = 0; j < QK_K; j += 64) { | |
| get_scale_min_k4(is + 0, x[i].scales, &sc, &m); | |
| const float d1 = d * sc; const float m1 = min * m; | |
| get_scale_min_k4(is + 1, x[i].scales, &sc, &m); | |
| const float d2 = d * sc; const float m2 = min * m; | |
| for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1; | |
| for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2; | |
| ql += 32; is += 2; | |
| u1 <<= 2; u2 <<= 2; | |
| } | |
| float d = GGML_FP16_TO_FP32(x[i].d); | |
| const int8_t * restrict s = x[i].scales; | |
| for (int l = 0; l < 8; ++l) { | |
| y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16)); | |
| y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16)); | |
| y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16)); | |
| y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16)); | |
| y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16)); | |
| y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16)); | |
| y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16)); | |
| y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16)); | |
| } | |
| y += QK_K; | |
| } | |
| } | |
| void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) { | |
| assert(k % QK_K == 0); | |
| block_q5_K * restrict y = vy; | |
| quantize_row_q5_K_reference(x, y, k); | |
| } | |
| size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { | |
| assert(k % QK_K == 0); | |
| (void)hist; // TODO: collect histograms | |
| for (int j = 0; j < n; j += k) { | |
| block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K; | |
| quantize_row_q5_K_reference(src + j, y, k); | |
| } | |
| return (n/QK_K*sizeof(block_q5_K)); | |
| } | |
| static void quantize_row_q5_K_impl(const float * restrict x, block_q5_K * restrict y, int n_per_row, const float * quant_weights) { | |
| (void)quant_weights; | |
| quantize_row_q5_K_reference(x, y, n_per_row); | |
| assert(n_per_row % QK_K == 0); | |
| const int nb = n_per_row / QK_K; | |
| uint8_t L[QK_K]; | |
| uint8_t Laux[32]; | |
| uint8_t Ls[QK_K/32]; | |
| uint8_t Lm[QK_K/32]; | |
| float mins[QK_K/32]; | |
| float scales[QK_K/32]; | |
| float sw[QK_K/32]; | |
| float weights[32]; | |
| for (int i = 0; i < nb; i++) { | |
| float sum_x2 = 0; | |
| for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l]; | |
| float sigma2 = 2*sum_x2/QK_K; | |
| float av_x = sqrtf(sigma2); | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| if (quant_weights) { | |
| const float * qw = quant_weights + QK_K*i + 32*j; | |
| for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]); | |
| } else { | |
| for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); | |
| } | |
| float sumw = 0; | |
| for (int l = 0; l < 32; ++l) sumw += weights[l]; | |
| sw[j] = sumw; | |
| scales[j] = make_qkx3_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false); | |
| } | |
| float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw); | |
| float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw); | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| uint8_t ls = Ls[j]; | |
| uint8_t lm = Lm[j]; | |
| ls = MIN(63, ls); | |
| lm = MIN(63, lm); | |
| if (j < 4) { | |
| y[i].scales[j] = ls; | |
| y[i].scales[j+4] = lm; | |
| } else { | |
| y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); | |
| y[i].scales[j-4] |= ((ls >> 4) << 6); | |
| y[i].scales[j-0] |= ((lm >> 4) << 6); | |
| } | |
| } | |
| y[i].d = GGML_FP32_TO_FP16(d_block); | |
| y[i].dmin = GGML_FP32_TO_FP16(m_block); | |
| uint8_t sc, m; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| get_scale_min_k4(j, y[i].scales, &sc, &m); | |
| const float d = GGML_FP16_TO_FP32(y[i].d) * sc; | |
| if (!d) continue; | |
| const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; | |
| for (int ii = 0; ii < 32; ++ii) { | |
| int l = nearest_int((x[32*j + ii] + dm)/d); | |
| l = MAX(0, MIN(31, l)); | |
| L[32*j + ii] = l; | |
| } | |
| } | |
| uint8_t * restrict qh = y[i].qh; | |
| uint8_t * restrict ql = y[i].qs; | |
| memset(qh, 0, QK_K/8); | |
| uint8_t m1 = 1, m2 = 2; | |
| for (int n = 0; n < QK_K; n += 64) { | |
| for (int j = 0; j < 32; ++j) { | |
| int l1 = L[n + j]; | |
| if (l1 > 15) { | |
| l1 -= 16; qh[j] |= m1; | |
| } | |
| int l2 = L[n + j + 32]; | |
| if (l2 > 15) { | |
| l2 -= 16; qh[j] |= m2; | |
| } | |
| ql[j] = l1 | (l2 << 4); | |
| } | |
| m1 <<= 2; m2 <<= 2; | |
| ql += 32; | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| size_t quantize_q5_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| size_t row_size = ggml_row_size(GGML_TYPE_Q5_K, n_per_row); | |
| if (!quant_weights) { | |
| quantize_row_q5_K_reference(src, dst, nrow*n_per_row); | |
| } | |
| else { | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_q5_K_impl(src, (block_q5_K*)qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += row_size; | |
| } | |
| } | |
| return nrow * row_size; | |
| } | |
| // ====================== 6-bit (de)-quantization | |
| void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| int8_t L[QK_K]; | |
| float scales[QK_K/16]; | |
| for (int i = 0; i < nb; i++) { | |
| float max_scale = 0; | |
| float max_abs_scale = 0; | |
| for (int ib = 0; ib < QK_K/16; ++ib) { | |
| const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL); | |
| scales[ib] = scale; | |
| const float abs_scale = fabsf(scale); | |
| if (abs_scale > max_abs_scale) { | |
| max_abs_scale = abs_scale; | |
| max_scale = scale; | |
| } | |
| } | |
| if (!max_abs_scale) { | |
| memset(&y[i], 0, sizeof(block_q6_K)); | |
| y[i].d = GGML_FP32_TO_FP16(0.f); | |
| x += QK_K; | |
| continue; | |
| } | |
| float iscale = -128.f/max_scale; | |
| y[i].d = GGML_FP32_TO_FP16(1/iscale); | |
| for (int ib = 0; ib < QK_K/16; ++ib) { | |
| y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib])); | |
| } | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j]; | |
| if (!d) { | |
| continue; | |
| } | |
| for (int ii = 0; ii < 16; ++ii) { | |
| int l = nearest_int(x[16*j + ii]/d); | |
| l = MAX(-32, MIN(31, l)); | |
| L[16*j + ii] = l + 32; | |
| } | |
| } | |
| uint8_t * restrict ql = y[i].ql; | |
| uint8_t * restrict qh = y[i].qh; | |
| for (int j = 0; j < QK_K; j += 128) { | |
| for (int l = 0; l < 32; ++l) { | |
| const uint8_t q1 = L[j + l + 0] & 0xF; | |
| const uint8_t q2 = L[j + l + 32] & 0xF; | |
| const uint8_t q3 = L[j + l + 64] & 0xF; | |
| const uint8_t q4 = L[j + l + 96] & 0xF; | |
| ql[l+ 0] = q1 | (q3 << 4); | |
| ql[l+32] = q2 | (q4 << 4); | |
| qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6); | |
| } | |
| ql += 64; | |
| qh += 32; | |
| } | |
| for (int l = 0; l < 32; ++l) { | |
| const uint8_t q1 = L[l + 0] & 0xF; | |
| const uint8_t q2 = L[l + 32] & 0xF; | |
| ql[l] = q1 | (q2 << 4); | |
| } | |
| for (int l = 0; l < 16; ++l) { | |
| qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6); | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict ql = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict sc = x[i].scales; | |
| for (int n = 0; n < QK_K; n += 128) { | |
| for (int l = 0; l < 32; ++l) { | |
| int is = l/16; | |
| const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; | |
| const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; | |
| const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; | |
| const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; | |
| y[l + 0] = d * sc[is + 0] * q1; | |
| y[l + 32] = d * sc[is + 2] * q2; | |
| y[l + 64] = d * sc[is + 4] * q3; | |
| y[l + 96] = d * sc[is + 6] * q4; | |
| } | |
| y += 128; | |
| ql += 64; | |
| qh += 32; | |
| sc += 8; | |
| } | |
| for (int l = 0; l < 16; ++l) { | |
| const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; | |
| const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; | |
| const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; | |
| const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; | |
| y[l+ 0] = d * sc[0] * q1; | |
| y[l+16] = d * sc[1] * q2; | |
| y[l+32] = d * sc[2] * q3; | |
| y[l+48] = d * sc[3] * q4; | |
| } | |
| y += 64; | |
| } | |
| } | |
| void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) { | |
| assert(k % QK_K == 0); | |
| block_q6_K * restrict y = vy; | |
| quantize_row_q6_K_reference(x, y, k); | |
| } | |
| size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) { | |
| assert(k % QK_K == 0); | |
| (void)hist; // TODO: collect histograms | |
| for (int j = 0; j < n; j += k) { | |
| block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K; | |
| quantize_row_q6_K_reference(src + j, y, k); | |
| } | |
| return (n/QK_K*sizeof(block_q6_K)); | |
| } | |
| static void quantize_row_q6_K_impl(const float * restrict x, block_q6_K * restrict y, int n_per_row, const float * quant_weights) { | |
| (void)quant_weights; | |
| quantize_row_q6_K_reference(x, y, n_per_row); | |
| assert(n_per_row % QK_K == 0); | |
| const int nb = n_per_row / QK_K; | |
| int8_t L[QK_K]; | |
| float scales[QK_K/16]; | |
| //float weights[16]; | |
| for (int i = 0; i < nb; i++) { | |
| //float sum_x2 = 0; | |
| //for (int j = 0; j < QK_K; ++j) sum_x2 += x[j]*x[j]; | |
| //float sigma2 = sum_x2/QK_K; | |
| float max_scale = 0; | |
| float max_abs_scale = 0; | |
| for (int ib = 0; ib < QK_K/16; ++ib) { | |
| float scale; | |
| if (quant_weights) { | |
| const float * qw = quant_weights + QK_K*i + 16*ib; | |
| //for (int j = 0; j < 16; ++j) weights[j] = qw[j] * sqrtf(sigma2 + x[16*ib + j]*x[16*ib + j]); | |
| //scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, weights); | |
| scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, qw); | |
| } else { | |
| scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL); | |
| } | |
| scales[ib] = scale; | |
| const float abs_scale = fabsf(scale); | |
| if (abs_scale > max_abs_scale) { | |
| max_abs_scale = abs_scale; | |
| max_scale = scale; | |
| } | |
| } | |
| if (!max_abs_scale) { | |
| memset(&y[i], 0, sizeof(block_q6_K)); | |
| y[i].d = GGML_FP32_TO_FP16(0.f); | |
| x += QK_K; | |
| continue; | |
| } | |
| float iscale = -128.f/max_scale; | |
| y[i].d = GGML_FP32_TO_FP16(1/iscale); | |
| for (int ib = 0; ib < QK_K/16; ++ib) { | |
| y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib])); | |
| } | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j]; | |
| if (!d) { | |
| continue; | |
| } | |
| for (int ii = 0; ii < 16; ++ii) { | |
| int l = nearest_int(x[16*j + ii]/d); | |
| l = MAX(-32, MIN(31, l)); | |
| L[16*j + ii] = l + 32; | |
| } | |
| } | |
| uint8_t * restrict ql = y[i].ql; | |
| uint8_t * restrict qh = y[i].qh; | |
| for (int j = 0; j < QK_K; j += 128) { | |
| for (int l = 0; l < 32; ++l) { | |
| const uint8_t q1 = L[j + l + 0] & 0xF; | |
| const uint8_t q2 = L[j + l + 32] & 0xF; | |
| const uint8_t q3 = L[j + l + 64] & 0xF; | |
| const uint8_t q4 = L[j + l + 96] & 0xF; | |
| ql[l+ 0] = q1 | (q3 << 4); | |
| ql[l+32] = q2 | (q4 << 4); | |
| qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6); | |
| } | |
| ql += 64; | |
| qh += 32; | |
| } | |
| x += QK_K; | |
| } | |
| } | |
| size_t quantize_q6_K(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| size_t row_size = ggml_row_size(GGML_TYPE_Q6_K, n_per_row); | |
| if (!quant_weights) { | |
| quantize_row_q6_K_reference(src, dst, nrow*n_per_row); | |
| } | |
| else { | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_q6_K_impl(src, (block_q6_K*)qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += row_size; | |
| } | |
| } | |
| return nrow * row_size; | |
| } | |
| static void quantize_row_q4_0_impl(const float * restrict x, block_q4_0 * restrict y, int n_per_row, const float * quant_weights) { | |
| static_assert(QK4_0 == 32, "QK4_0 must be 32"); | |
| if (!quant_weights) { | |
| quantize_row_q4_0_reference(x, y, n_per_row); | |
| return; | |
| } | |
| float weight[QK4_0]; | |
| int8_t L[QK4_0]; | |
| float sum_x2 = 0; | |
| for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j]; | |
| float sigma2 = sum_x2/n_per_row; | |
| const int nb = n_per_row/QK4_0; | |
| for (int ib = 0; ib < nb; ++ib) { | |
| const float * xb = x + QK4_0 * ib; | |
| const float * qw = quant_weights + QK4_0 * ib; | |
| for (int j = 0; j < QK4_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); | |
| float d = make_qx_quants(QK4_0, 8, xb, L, 1, weight); | |
| y[ib].d = GGML_FP32_TO_FP16(d); | |
| for (int j = 0; j < 16; ++j) { | |
| y[ib].qs[j] = L[j] | (L[j+16] << 4); | |
| } | |
| } | |
| } | |
| size_t quantize_q4_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| if (!quant_weights) { | |
| return ggml_quantize_q4_0(src, dst, nrow*n_per_row, n_per_row, hist); | |
| } | |
| size_t row_size = ggml_row_size(GGML_TYPE_Q4_0, n_per_row); | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_q4_0_impl(src, (block_q4_0*)qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += row_size; | |
| } | |
| return nrow * row_size; | |
| } | |
| static void quantize_row_q4_1_impl(const float * restrict x, block_q4_1 * restrict y, int n_per_row, const float * quant_weights) { | |
| static_assert(QK4_1 == 32, "QK4_1 must be 32"); | |
| if (!quant_weights) { | |
| quantize_row_q4_1_reference(x, y, n_per_row); | |
| return; | |
| } | |
| float weight[QK4_1]; | |
| uint8_t L[QK4_1], Laux[QK4_1]; | |
| float sum_x2 = 0; | |
| for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j]; | |
| float sigma2 = sum_x2/n_per_row; | |
| const int nb = n_per_row/QK4_1; | |
| for (int ib = 0; ib < nb; ++ib) { | |
| const float * xb = x + QK4_1 * ib; | |
| const float * qw = quant_weights + QK4_1 * ib; | |
| for (int j = 0; j < QK4_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); | |
| float min; | |
| float d = make_qkx3_quants(QK4_1, 15, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false); | |
| y[ib].d = GGML_FP32_TO_FP16(d); | |
| y[ib].m = GGML_FP32_TO_FP16(-min); | |
| for (int j = 0; j < 16; ++j) { | |
| y[ib].qs[j] = L[j] | (L[j+16] << 4); | |
| } | |
| } | |
| } | |
| size_t quantize_q4_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| if (!quant_weights) { | |
| return ggml_quantize_q4_1(src, dst, nrow*n_per_row, n_per_row, hist); | |
| } | |
| size_t row_size = ggml_row_size(GGML_TYPE_Q4_1, n_per_row); | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_q4_1_impl(src, (block_q4_1*)qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += row_size; | |
| } | |
| return nrow * row_size; | |
| } | |
| static void quantize_row_q5_0_impl(const float * restrict x, block_q5_0 * restrict y, int n_per_row, const float * quant_weights) { | |
| static_assert(QK5_0 == 32, "QK5_0 must be 32"); | |
| if (!quant_weights) { | |
| quantize_row_q5_0_reference(x, y, n_per_row); | |
| return; | |
| } | |
| float weight[QK5_0]; | |
| int8_t L[QK5_0]; | |
| float sum_x2 = 0; | |
| for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j]; | |
| float sigma2 = sum_x2/n_per_row; | |
| const int nb = n_per_row/QK5_0; | |
| for (int ib = 0; ib < nb; ++ib) { | |
| const float * xb = x + QK5_0 * ib; | |
| const float * qw = quant_weights + QK5_0 * ib; | |
| for (int j = 0; j < QK5_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); | |
| float d = make_qx_quants(QK5_0, 16, xb, L, 1, weight); | |
| y[ib].d = GGML_FP32_TO_FP16(d); | |
| uint32_t qh = 0; | |
| for (int j = 0; j < 16; ++j) { | |
| const uint8_t xi0 = L[j]; | |
| const uint8_t xi1 = L[j+16]; | |
| y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); | |
| // get the 5-th bit and store it in qh at the right position | |
| qh |= ((xi0 & 0x10u) >> 4) << (j + 0); | |
| qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); | |
| } | |
| memcpy(&y[ib].qh, &qh, sizeof(qh)); | |
| } | |
| } | |
| size_t quantize_q5_0(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| if (!quant_weights) { | |
| return ggml_quantize_q5_0(src, dst, nrow*n_per_row, n_per_row, hist); | |
| } | |
| size_t row_size = ggml_row_size(GGML_TYPE_Q5_0, n_per_row); | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_q5_0_impl(src, (block_q5_0*)qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += row_size; | |
| } | |
| return nrow * row_size; | |
| } | |
| static void quantize_row_q5_1_impl(const float * restrict x, block_q5_1 * restrict y, int n_per_row, const float * quant_weights) { | |
| static_assert(QK5_1 == 32, "QK5_1 must be 32"); | |
| if (!quant_weights) { | |
| quantize_row_q5_1_reference(x, y, n_per_row); | |
| return; | |
| } | |
| float weight[QK5_1]; | |
| uint8_t L[QK5_1], Laux[QK5_1]; | |
| float sum_x2 = 0; | |
| for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j]; | |
| float sigma2 = sum_x2/n_per_row; | |
| const int nb = n_per_row/QK5_1; | |
| for (int ib = 0; ib < nb; ++ib) { | |
| const float * xb = x + QK5_1 * ib; | |
| const float * qw = quant_weights + QK5_1 * ib; | |
| for (int j = 0; j < QK5_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); | |
| float min; | |
| float d = make_qkx3_quants(QK5_1, 31, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false); | |
| y[ib].d = GGML_FP32_TO_FP16(d); | |
| y[ib].m = GGML_FP32_TO_FP16(-min); | |
| uint32_t qh = 0; | |
| for (int j = 0; j < 16; ++j) { | |
| const uint8_t xi0 = L[j]; | |
| const uint8_t xi1 = L[j+16]; | |
| y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); | |
| // get the 5-th bit and store it in qh at the right position | |
| qh |= ((xi0 & 0x10u) >> 4) << (j + 0); | |
| qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); | |
| } | |
| memcpy(&y[ib].qh, &qh, sizeof(qh)); | |
| } | |
| } | |
| size_t quantize_q5_1(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| if (!quant_weights) { | |
| return ggml_quantize_q5_1(src, dst, nrow*n_per_row, n_per_row, hist); | |
| } | |
| size_t row_size = ggml_row_size(GGML_TYPE_Q5_1, n_per_row); | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_q5_1_impl(src, (block_q5_1*)qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += row_size; | |
| } | |
| return nrow * row_size; | |
| } | |
| // ====================== "True" 2-bit (de)-quantization | |
| static const uint64_t iq2xxs_grid[256] = { | |
| 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, | |
| 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808, | |
| 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819, | |
| 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819, | |
| 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b, | |
| 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808, | |
| 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08, | |
| 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b, | |
| 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819, | |
| 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08, | |
| 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, | |
| 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08, | |
| 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808, | |
| 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808, | |
| 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919, | |
| 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819, | |
| 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08, | |
| 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908, | |
| 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819, | |
| 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808, | |
| 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808, | |
| 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908, | |
| 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808, | |
| 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08, | |
| 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819, | |
| 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819, | |
| 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819, | |
| 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908, | |
| 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19, | |
| 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819, | |
| 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b, | |
| 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808, | |
| 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908, | |
| 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08, | |
| 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08, | |
| 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908, | |
| 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819, | |
| 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808, | |
| 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808, | |
| 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19, | |
| 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819, | |
| 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, | |
| 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b, | |
| 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08, | |
| 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808, | |
| 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908, | |
| 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b, | |
| 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819, | |
| 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08, | |
| 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08, | |
| 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808, | |
| 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b, | |
| 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b, | |
| 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908, | |
| 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819, | |
| 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808, | |
| 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908, | |
| 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b, | |
| 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808, | |
| 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b, | |
| 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b, | |
| 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808, | |
| 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19, | |
| 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908, | |
| }; | |
| static const uint64_t iq2xs_grid[512] = { | |
| 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, | |
| 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, | |
| 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, | |
| 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, | |
| 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, | |
| 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808, | |
| 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819, | |
| 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819, | |
| 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, | |
| 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b, | |
| 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b, | |
| 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908, | |
| 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908, | |
| 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919, | |
| 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808, | |
| 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919, | |
| 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908, | |
| 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, | |
| 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, | |
| 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08, | |
| 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808, | |
| 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808, | |
| 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819, | |
| 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908, | |
| 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819, | |
| 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808, | |
| 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b, | |
| 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819, | |
| 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819, | |
| 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808, | |
| 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908, | |
| 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19, | |
| 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b, | |
| 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b, | |
| 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919, | |
| 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808, | |
| 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819, | |
| 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819, | |
| 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b, | |
| 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908, | |
| 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808, | |
| 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819, | |
| 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808, | |
| 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, | |
| 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808, | |
| 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808, | |
| 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908, | |
| 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908, | |
| 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808, | |
| 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b, | |
| 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819, | |
| 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, | |
| 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908, | |
| 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808, | |
| 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908, | |
| 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919, | |
| 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08, | |
| 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19, | |
| 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b, | |
| 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b, | |
| 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808, | |
| 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08, | |
| 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b, | |
| 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908, | |
| 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b, | |
| 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908, | |
| 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, | |
| 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808, | |
| 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808, | |
| 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08, | |
| 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819, | |
| 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919, | |
| 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808, | |
| 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808, | |
| 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819, | |
| 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819, | |
| 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908, | |
| 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908, | |
| 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b, | |
| 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908, | |
| 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908, | |
| 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908, | |
| 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808, | |
| 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, | |
| 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819, | |
| 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819, | |
| 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808, | |
| 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b, | |
| 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819, | |
| 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819, | |
| 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08, | |
| 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808, | |
| 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19, | |
| 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919, | |
| 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, | |
| 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19, | |
| 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b, | |
| 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808, | |
| 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b, | |
| 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b, | |
| 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, | |
| 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b, | |
| 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808, | |
| 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819, | |
| 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808, | |
| 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808, | |
| 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08, | |
| 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b, | |
| 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19, | |
| 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08, | |
| 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919, | |
| 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08, | |
| 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08, | |
| 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908, | |
| 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908, | |
| 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b, | |
| 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908, | |
| 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808, | |
| 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b, | |
| 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808, | |
| 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808, | |
| 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19, | |
| 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08, | |
| 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808, | |
| 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b, | |
| 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808, | |
| 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b, | |
| 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b, | |
| }; | |
| static const uint32_t iq3xxs_grid[256] = { | |
| 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414, | |
| 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14, | |
| 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404, | |
| 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e, | |
| 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c, | |
| 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c, | |
| 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34, | |
| 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c, | |
| 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c, | |
| 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04, | |
| 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c, | |
| 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414, | |
| 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434, | |
| 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c, | |
| 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e, | |
| 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24, | |
| 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24, | |
| 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c, | |
| 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c, | |
| 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14, | |
| 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414, | |
| 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e, | |
| 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404, | |
| 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c, | |
| 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c, | |
| 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14, | |
| 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c, | |
| 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c, | |
| 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14, | |
| 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14, | |
| 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c, | |
| 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04, | |
| }; | |
| static const uint32_t iq3xs_grid[512] = { | |
| 0x04040404, 0x0404040c, 0x04040414, 0x0404042c, 0x0404043e, 0x04040c04, 0x04040c0c, 0x04040c14, | |
| 0x04040c24, 0x04040c34, 0x04041404, 0x0404140c, 0x0404142c, 0x04041c1c, 0x04042404, 0x04042414, | |
| 0x0404242c, 0x0404243e, 0x04042c0c, 0x04042c1c, 0x04043404, 0x04043414, 0x04043e0c, 0x04043e24, | |
| 0x04043e3e, 0x040c0404, 0x040c040c, 0x040c0414, 0x040c0424, 0x040c0c04, 0x040c0c0c, 0x040c0c2c, | |
| 0x040c1404, 0x040c141c, 0x040c143e, 0x040c1c0c, 0x040c1c2c, 0x040c2424, 0x040c340c, 0x040c342c, | |
| 0x040c3e14, 0x04140404, 0x0414040c, 0x0414042c, 0x0414043e, 0x04140c04, 0x04140c1c, 0x04140c34, | |
| 0x0414140c, 0x0414142c, 0x04141c04, 0x04141c24, 0x04142414, 0x0414242c, 0x0414243e, 0x04142c0c, | |
| 0x04142c1c, 0x04143e04, 0x04143e1c, 0x041c041c, 0x041c0c0c, 0x041c0c2c, 0x041c1404, 0x041c1414, | |
| 0x041c1c0c, 0x041c1c1c, 0x041c1c34, 0x041c2424, 0x041c2c04, 0x041c2c14, 0x041c343e, 0x041c3e0c, | |
| 0x041c3e2c, 0x04240404, 0x04240c1c, 0x04240c3e, 0x0424140c, 0x04241424, 0x04241c14, 0x04242404, | |
| 0x0424241c, 0x04242c0c, 0x04243e04, 0x042c0414, 0x042c0424, 0x042c1404, 0x042c1414, 0x042c1434, | |
| 0x042c1c1c, 0x042c240c, 0x042c242c, 0x042c243e, 0x042c3434, 0x042c3e1c, 0x04340434, 0x04340c0c, | |
| 0x04340c1c, 0x04341c0c, 0x04342c14, 0x04343e0c, 0x043e0404, 0x043e0414, 0x043e0424, 0x043e1404, | |
| 0x043e1414, 0x043e1434, 0x043e1c1c, 0x043e2c04, 0x043e2c24, 0x0c040404, 0x0c04040c, 0x0c040414, | |
| 0x0c040424, 0x0c040c04, 0x0c040c0c, 0x0c040c1c, 0x0c040c2c, 0x0c040c3e, 0x0c041404, 0x0c041414, | |
| 0x0c041c0c, 0x0c041c24, 0x0c041c34, 0x0c042c24, 0x0c042c34, 0x0c04340c, 0x0c043e14, 0x0c0c0404, | |
| 0x0c0c040c, 0x0c0c041c, 0x0c0c0434, 0x0c0c0c04, 0x0c0c0c24, 0x0c0c140c, 0x0c0c1c04, 0x0c0c1c1c, | |
| 0x0c0c240c, 0x0c0c2c04, 0x0c0c2c14, 0x0c0c3e04, 0x0c0c3e34, 0x0c140404, 0x0c140c14, 0x0c140c2c, | |
| 0x0c140c3e, 0x0c141404, 0x0c141424, 0x0c141c14, 0x0c142404, 0x0c14241c, 0x0c142c2c, 0x0c143404, | |
| 0x0c143e14, 0x0c1c040c, 0x0c1c0424, 0x0c1c043e, 0x0c1c0c04, 0x0c1c0c1c, 0x0c1c140c, 0x0c1c143e, | |
| 0x0c1c1c04, 0x0c1c1c24, 0x0c1c240c, 0x0c1c3414, 0x0c1c3e04, 0x0c24041c, 0x0c24042c, 0x0c240c14, | |
| 0x0c240c24, 0x0c241c0c, 0x0c241c1c, 0x0c242414, 0x0c242434, 0x0c242c04, 0x0c242c24, 0x0c2c040c, | |
| 0x0c2c0c04, 0x0c2c0c1c, 0x0c2c140c, 0x0c2c1c04, 0x0c2c1c14, 0x0c2c2c0c, 0x0c341404, 0x0c341424, | |
| 0x0c34143e, 0x0c342424, 0x0c342434, 0x0c3e040c, 0x0c3e041c, 0x0c3e0c04, 0x0c3e0c14, 0x0c3e140c, | |
| 0x0c3e1c2c, 0x0c3e240c, 0x0c3e3414, 0x0c3e3e04, 0x14040404, 0x1404040c, 0x1404041c, 0x1404042c, | |
| 0x1404043e, 0x14040c04, 0x14040c14, 0x14040c24, 0x14040c34, 0x1404140c, 0x1404141c, 0x1404143e, | |
| 0x14041c04, 0x14041c14, 0x1404240c, 0x1404241c, 0x1404242c, 0x14042c04, 0x14042c14, 0x1404343e, | |
| 0x14043e04, 0x14043e1c, 0x14043e2c, 0x140c0404, 0x140c0414, 0x140c0c04, 0x140c0c1c, 0x140c0c3e, | |
| 0x140c1414, 0x140c142c, 0x140c1c0c, 0x140c1c24, 0x140c2414, 0x140c2c0c, 0x1414040c, 0x14140424, | |
| 0x1414043e, 0x1414140c, 0x1414141c, 0x14141c04, 0x14141c3e, 0x1414240c, 0x14142c1c, 0x14142c3e, | |
| 0x14143e0c, 0x14143e24, 0x141c0404, 0x141c0414, 0x141c042c, 0x141c0c0c, 0x141c1414, 0x141c1424, | |
| 0x141c1c0c, 0x141c1c1c, 0x141c2414, 0x141c2c04, 0x141c3434, 0x1424040c, 0x1424043e, 0x14241404, | |
| 0x1424141c, 0x14241c14, 0x14241c2c, 0x1424240c, 0x14243e14, 0x14243e2c, 0x142c0424, 0x142c0c0c, | |
| 0x142c1414, 0x142c1c3e, 0x142c2404, 0x142c2c1c, 0x142c3e04, 0x14340404, 0x14340414, 0x1434043e, | |
| 0x1434140c, 0x14342c2c, 0x1434340c, 0x143e042c, 0x143e0c0c, 0x143e1434, 0x143e1c04, 0x143e241c, | |
| 0x143e2c04, 0x1c040414, 0x1c040c0c, 0x1c040c1c, 0x1c040c2c, 0x1c040c3e, 0x1c041414, 0x1c041c0c, | |
| 0x1c041c1c, 0x1c041c2c, 0x1c042414, 0x1c042424, 0x1c04243e, 0x1c042c0c, 0x1c04341c, 0x1c043e0c, | |
| 0x1c0c040c, 0x1c0c041c, 0x1c0c042c, 0x1c0c0c24, 0x1c0c140c, 0x1c0c141c, 0x1c0c2404, 0x1c0c3404, | |
| 0x1c0c3e14, 0x1c0c3e34, 0x1c140404, 0x1c140c14, 0x1c141404, 0x1c141c14, 0x1c141c24, 0x1c142c04, | |
| 0x1c1c040c, 0x1c1c0c04, 0x1c1c0c24, 0x1c1c140c, 0x1c1c141c, 0x1c1c143e, 0x1c1c1c04, 0x1c1c240c, | |
| 0x1c1c241c, 0x1c1c243e, 0x1c1c2c2c, 0x1c1c3e1c, 0x1c24041c, 0x1c240c0c, 0x1c240c34, 0x1c241414, | |
| 0x1c241c0c, 0x1c242c14, 0x1c243404, 0x1c243424, 0x1c2c040c, 0x1c2c0c04, 0x1c2c0c14, 0x1c2c142c, | |
| 0x1c2c1c14, 0x1c2c2424, 0x1c2c2c34, 0x1c2c3e1c, 0x1c340c34, 0x1c34240c, 0x1c3e040c, 0x1c3e041c, | |
| 0x1c3e1404, 0x1c3e1414, 0x1c3e1c2c, 0x24040404, 0x24040424, 0x24040c14, 0x24041404, 0x24041424, | |
| 0x2404143e, 0x24041c14, 0x2404240c, 0x24042c04, 0x24043e04, 0x240c0414, 0x240c043e, 0x240c0c0c, | |
| 0x240c0c1c, 0x240c1414, 0x240c1c04, 0x240c1c2c, 0x240c241c, 0x240c2c0c, 0x240c2c2c, 0x2414040c, | |
| 0x2414041c, 0x24140c04, 0x24140c2c, 0x2414140c, 0x24141c1c, 0x24142404, 0x24142c3e, 0x24143414, | |
| 0x24143e04, 0x241c0424, 0x241c0c0c, 0x241c0c1c, 0x241c1404, 0x241c1414, 0x241c1c0c, 0x241c1c2c, | |
| 0x24240404, 0x24240414, 0x24241424, 0x24241c3e, 0x24242404, 0x24243e0c, 0x242c042c, 0x242c043e, | |
| 0x242c140c, 0x242c3414, 0x24340c1c, 0x24341c24, 0x24343404, 0x243e0c04, 0x243e0c2c, 0x243e1c04, | |
| 0x243e241c, 0x243e2c0c, 0x2c040414, 0x2c040c04, 0x2c040c24, 0x2c041414, 0x2c042404, 0x2c042424, | |
| 0x2c04243e, 0x2c042c14, 0x2c043434, 0x2c043e24, 0x2c0c040c, 0x2c0c041c, 0x2c0c042c, 0x2c0c0c14, | |
| 0x2c0c140c, 0x2c0c1c14, 0x2c0c3e14, 0x2c140404, 0x2c140c0c, 0x2c14141c, 0x2c141c04, 0x2c141c34, | |
| 0x2c142c1c, 0x2c1c0414, 0x2c1c043e, 0x2c1c0c04, 0x2c1c143e, 0x2c1c2424, 0x2c1c2c0c, 0x2c1c342c, | |
| 0x2c1c3e1c, 0x2c24040c, 0x2c240424, 0x2c241404, 0x2c241c14, 0x2c242434, 0x2c2c0c14, 0x2c2c1434, | |
| 0x2c2c2c0c, 0x2c2c2c1c, 0x2c342414, 0x2c3e0414, 0x2c3e0424, 0x2c3e1414, 0x34040c0c, 0x34040c1c, | |
| 0x34040c2c, 0x34041c0c, 0x34041c1c, 0x34043404, 0x340c0404, 0x340c1404, 0x340c143e, 0x340c3424, | |
| 0x34140c14, 0x34141c24, 0x34142414, 0x34142c2c, 0x34143414, 0x34143e04, 0x341c0404, 0x341c0c24, | |
| 0x341c140c, 0x341c2404, 0x3424142c, 0x3424241c, 0x34243414, 0x342c0404, 0x342c041c, 0x342c1c24, | |
| 0x342c3404, 0x3434042c, 0x34342404, 0x343e0c0c, 0x343e0c1c, 0x3e040404, 0x3e040424, 0x3e04043e, | |
| 0x3e041404, 0x3e041414, 0x3e041c34, 0x3e042404, 0x3e042c24, 0x3e043414, 0x3e0c0414, 0x3e0c0c0c, | |
| 0x3e0c1424, 0x3e0c241c, 0x3e0c242c, 0x3e14040c, 0x3e140424, 0x3e140c04, 0x3e140c34, 0x3e14140c, | |
| 0x3e141c04, 0x3e142c0c, 0x3e1c0414, 0x3e1c1c14, 0x3e1c1c2c, 0x3e1c2c1c, 0x3e24040c, 0x3e24042c, | |
| 0x3e240c1c, 0x3e241404, 0x3e242c04, 0x3e2c1414, 0x3e2c2414, 0x3e340414, 0x3e341c0c, 0x3e3e0404, | |
| }; | |
| static const uint64_t iq1s_grid[NGRID_IQ2XXS] = { | |
| 0xffffffffffff0101, 0xffffffffff01ff00, 0xffffffffff010100, 0xffffffff00000000, | |
| 0xffffffff01ff00ff, 0xffffffff01ff0001, 0xffffffff0101ffff, 0xffffffff0101ff01, | |
| 0xffffff00ff000000, 0xffffff000000ff00, 0xffffff00000000ff, 0xffffff0000000100, | |
| 0xffffff0000010000, 0xffffff0001000000, 0xffffff01ffff00ff, 0xffffff01ff01ff00, | |
| 0xffffff01ff010100, 0xffffff0100000001, 0xffffff0101ffff00, 0xffffff0101ff0101, | |
| 0xffffff0101010100, 0xffff00ffff00ff01, 0xffff00ffff0000ff, 0xffff00ff00ff0100, | |
| 0xffff00ff0100ff00, 0xffff00ff010001ff, 0xffff0000ff0101ff, 0xffff000000ffff00, | |
| 0xffff000000000000, 0xffff00000001ff01, 0xffff000001000101, 0xffff0000010100ff, | |
| 0xffff0001ffff0100, 0xffff00010000ff00, 0xffff000100010101, 0xffff000101000000, | |
| 0xffff01ffffff0000, 0xffff01ffff01ffff, 0xffff01ffff010100, 0xffff01ff00000000, | |
| 0xffff01ff01ffffff, 0xffff01ff01ff0001, 0xffff01ff0101ffff, 0xffff01ff01010001, | |
| 0xffff0100ffffff01, 0xffff01000000ffff, 0xffff010000000100, 0xffff010001ff01ff, | |
| 0xffff010001000000, 0xffff0101ff000000, 0xffff0101000101ff, 0xffff010101ffff01, | |
| 0xffff01010101ff00, 0xff00ffffff000000, 0xff00ffff00ffff00, 0xff00ffff00000001, | |
| 0xff00ffff000001ff, 0xff00ffff01010000, 0xff00ff00ffff0000, 0xff00ff00ff00ff00, | |
| 0xff00ff00ff0000ff, 0xff00ff00ff000100, 0xff00ff00ff010001, 0xff00ff0000ff0001, | |
| 0xff00ff000000ffff, 0xff00ff0000000000, 0xff00ff000001ff00, 0xff00ff0000010100, | |
| 0xff00ff0001ff0000, 0xff00ff000100ff00, 0xff00ff0001000100, 0xff00ff01ff000000, | |
| 0xff00ff0100ff0000, 0xff00ff01000001ff, 0xff00ff0101010001, 0xff0000ff00000000, | |
| 0xff0000ff0001ff00, 0xff0000ff00010100, 0xff000000ffff0101, 0xff000000ff000000, | |
| 0xff000000ff01ff00, 0xff00000000ff0000, 0xff0000000000ff00, 0xff000000000000ff, | |
| 0xff00000000000000, 0xff00000000000001, 0xff00000000000100, 0xff0000000001ffff, | |
| 0xff00000000010000, 0xff00000001000000, 0xff00000001010100, 0xff000001ff00ff01, | |
| 0xff000001ff0100ff, 0xff00000100000000, 0xff0000010001ff00, 0xff00000101ff0100, | |
| 0xff0000010100ff00, 0xff0001ff00ff00ff, 0xff0001ff00000101, 0xff0001ff000100ff, | |
| 0xff0001ff01000000, 0xff000100ff0001ff, 0xff0001000000ff01, 0xff00010000000000, | |
| 0xff00010000010001, 0xff00010000010100, 0xff00010001ffff00, 0xff00010001ff0101, | |
| 0xff00010001010000, 0xff000101ffffffff, 0xff000101ff000101, 0xff00010101ff00ff, | |
| 0xff00010101000001, 0xff000101010100ff, 0xff01ffffff000101, 0xff01ffffff01ffff, | |
| 0xff01ffffff01ff01, 0xff01ffffff0101ff, 0xff01ffff00000000, 0xff01ffff01ff0001, | |
| 0xff01ffff0101ff01, 0xff01ff00ff000000, 0xff01ff0000ff0100, 0xff01ff000000ff01, | |
| 0xff01ff0000010000, 0xff01ff00010000ff, 0xff01ff01ff01ff00, 0xff01ff0100000101, | |
| 0xff0100ffffff0000, 0xff0100ffff010000, 0xff0100ff01ff00ff, 0xff0100ff01000100, | |
| 0xff0100ff010100ff, 0xff010000ffffff01, 0xff01000000000000, 0xff0100000101ff00, | |
| 0xff010001ffff00ff, 0xff010001ff000100, 0xff01000100ffff00, 0xff01000100010001, | |
| 0xff01000101ff0001, 0xff010001010001ff, 0xff0101ffffffffff, 0xff0101ffff01ffff, | |
| 0xff0101ffff010101, 0xff0101ff0000ff00, 0xff0101ff01010001, 0xff010100ff000000, | |
| 0xff010100ff01ff01, 0xff01010000ff0001, 0xff01010000000100, 0xff01010001000000, | |
| 0xff0101010100ffff, 0x00ffffff0000ff01, 0x00ffffff000000ff, 0x00ffffff00000100, | |
| 0x00ffffff00010000, 0x00ffff00ffff0001, 0x00ffff00ff0000ff, 0x00ffff00ff000100, | |
| 0x00ffff0000000000, 0x00ffff0001000100, 0x00ffff0001010001, 0x00ffff01ff00ff01, | |
| 0x00ffff0100ff0100, 0x00ffff010000ff00, 0x00ffff01000100ff, 0x00ffff0101ff00ff, | |
| 0x00ffff010101ff00, 0x00ff00ffffffffff, 0x00ff00ffffff01ff, 0x00ff00ffff000101, | |
| 0x00ff00ff00000000, 0x00ff00ff000101ff, 0x00ff00ff01010101, 0x00ff0000ff000000, | |
| 0x00ff0000ff01ffff, 0x00ff000000ff0000, 0x00ff00000000ff00, 0x00ff0000000000ff, | |
| 0x00ff000000000000, 0x00ff000000000001, 0x00ff000000000100, 0x00ff000000010000, | |
| 0x00ff000001ffff01, 0x00ff000001000000, 0x00ff0001ff000101, 0x00ff000100ffffff, | |
| 0x00ff000100000000, 0x00ff0001010001ff, 0x00ff01ffff000000, 0x00ff01ff0001ff00, | |
| 0x00ff01ff01ff0100, 0x00ff0100ff01ff01, 0x00ff010000ff00ff, 0x00ff010000ff0101, | |
| 0x00ff010000000000, 0x00ff010000010101, 0x00ff01000100ff00, 0x00ff010001010000, | |
| 0x00ff0101ffffff00, 0x00ff01010000ff01, 0x00ff010100000100, 0x00ff010101ff0000, | |
| 0x0000ffffffff0100, 0x0000ffffff00ff00, 0x0000ffffff0000ff, 0x0000ffffff010000, | |
| 0x0000ffff00000000, 0x0000ffff00010101, 0x0000ffff01ffff01, 0x0000ffff01000100, | |
| 0x0000ff00ff000000, 0x0000ff00ff01ff00, 0x0000ff00ff0101ff, 0x0000ff0000ff0000, | |
| 0x0000ff000000ff00, 0x0000ff00000000ff, 0x0000ff0000000000, 0x0000ff0000000001, | |
| 0x0000ff0000000100, 0x0000ff0000010000, 0x0000ff0001ffffff, 0x0000ff0001ff01ff, | |
| 0x0000ff0001000000, 0x0000ff000101ffff, 0x0000ff01ffff0101, 0x0000ff01ff010000, | |
| 0x0000ff0100000000, 0x0000ff0101000101, 0x000000ffffff0001, 0x000000ffff000000, | |
| 0x000000ff00ff0000, 0x000000ff0000ff00, 0x000000ff000000ff, 0x000000ff00000000, | |
| 0x000000ff00000001, 0x000000ff00000100, 0x000000ff00010000, 0x000000ff01000000, | |
| 0x000000ff0101ff00, 0x00000000ffff0000, 0x00000000ff00ff00, 0x00000000ff0000ff, | |
| 0x00000000ff000000, 0x00000000ff000001, 0x00000000ff000100, 0x00000000ff010000, | |
| 0x0000000000ffff00, 0x0000000000ff00ff, 0x0000000000ff0000, 0x0000000000ff0001, | |
| 0x0000000000ff0100, 0x000000000000ffff, 0x000000000000ff00, 0x000000000000ff01, | |
| 0x00000000000000ff, 0x0000000000000001, 0x00000000000001ff, 0x0000000000000100, | |
| 0x0000000000000101, 0x000000000001ff00, 0x00000000000100ff, 0x0000000000010000, | |
| 0x0000000000010001, 0x0000000000010100, 0x0000000001ff0000, 0x000000000100ff00, | |
| 0x00000000010000ff, 0x0000000001000000, 0x0000000001000001, 0x0000000001000100, | |
| 0x0000000001010000, 0x00000001ffff01ff, 0x00000001ff000000, 0x0000000100ff0000, | |
| 0x000000010000ff00, 0x00000001000000ff, 0x0000000100000000, 0x0000000100000001, | |
| 0x0000000100000100, 0x0000000100010000, 0x0000000101000000, 0x000001ffff00ff00, | |
| 0x000001ffff010001, 0x000001ffff0101ff, 0x000001ff00ffff01, 0x000001ff0000ffff, | |
| 0x000001ff00000000, 0x000001ff010000ff, 0x000001ff01010100, 0x00000100ffff0100, | |
| 0x00000100ff000000, 0x0000010000ff0000, 0x000001000000ff00, 0x00000100000000ff, | |
| 0x0000010000000000, 0x0000010000000001, 0x0000010000000100, 0x0000010000010000, | |
| 0x0000010001000000, 0x000001000101ff01, 0x00000101ffff0001, 0x00000101ff01ffff, | |
| 0x0000010100000000, 0x0000010101010100, 0x0001ffffff000000, 0x0001ffff00ffffff, | |
| 0x0001ffff00000100, 0x0001ffff0001ff00, 0x0001ffff01000000, 0x0001ff00ffffff00, | |
| 0x0001ff00ffff01ff, 0x0001ff00ff010000, 0x0001ff0000000000, 0x0001ff0000010001, | |
| 0x0001ff0001ff0000, 0x0001ff0001010100, 0x0001ff01ff0000ff, 0x0001ff01ff000001, | |
| 0x0001ff0100ffffff, 0x0001ff010001ffff, 0x0001ff01000101ff, 0x0001ff010100ff01, | |
| 0x000100ffff00ffff, 0x000100ffff00ff01, 0x000100ffff000100, 0x000100ff00000000, | |
| 0x000100ff000101ff, 0x000100ff01ff0101, 0x000100ff0100ffff, 0x000100ff01010101, | |
| 0x00010000ff000000, 0x00010000ff010100, 0x0001000000ff0000, 0x000100000000ff00, | |
| 0x00010000000000ff, 0x0001000000000000, 0x0001000000000001, 0x0001000000000100, | |
| 0x0001000000010000, 0x0001000001ffff01, 0x0001000001000000, 0x0001000100ff0101, | |
| 0x0001000100000000, 0x00010001010100ff, 0x000101ffffff01ff, 0x000101ffffff0101, | |
| 0x000101ff00010000, 0x000101ff01ff0000, 0x000101ff0100ff01, 0x00010100ffff0000, | |
| 0x0001010000000000, 0x000101000001ffff, 0x0001010000010101, 0x00010100010001ff, | |
| 0x00010101ff00ff00, 0x00010101ff010001, 0x0001010100ffffff, 0x0001010100ff01ff, | |
| 0x00010101000101ff, 0x0001010101ff0000, 0x000101010100ff01, 0x0001010101000101, | |
| 0x01ffffffffff0101, 0x01ffffffff01ffff, 0x01ffffffff01ff01, 0x01ffffffff0101ff, | |
| 0x01ffffffff010101, 0x01ffffff00000000, 0x01ffffff01ff01ff, 0x01ffffff01000101, | |
| 0x01ffffff0101ff01, 0x01ffffff010100ff, 0x01ffff000000ff00, 0x01ffff0000000001, | |
| 0x01ffff00000001ff, 0x01ffff0000010000, 0x01ffff0001ff0000, 0x01ffff01ffffffff, | |
| 0x01ffff01ffff01ff, 0x01ffff01ff000000, 0x01ffff01ff01ffff, 0x01ffff01ff0101ff, | |
| 0x01ffff010100ffff, 0x01ff00ffffff0000, 0x01ff00ffff010000, 0x01ff00ff00ffff01, | |
| 0x01ff0000ff0000ff, 0x01ff000000000000, 0x01ff00000001ff01, 0x01ff000001ffffff, | |
| 0x01ff000001010100, 0x01ff0001ffffff01, 0x01ff0001ff010001, 0x01ff000101ff0100, | |
| 0x01ff000101000001, 0x01ff0001010100ff, 0x01ff01ffff00ffff, 0x01ff01ff00010001, | |
| 0x01ff01ff01000000, 0x01ff01ff010101ff, 0x01ff0100ff000001, 0x01ff010000ffff00, | |
| 0x01ff010000000100, 0x01ff010001ff01ff, 0x01ff01000101ffff, 0x01ff0101ffff00ff, | |
| 0x01ff0101ffff0101, 0x01ff0101ff0101ff, 0x01ff010100010000, 0x0100ffff00ff00ff, | |
| 0x0100ffff00ff0001, 0x0100ffff00000100, 0x0100ffff0100ff00, 0x0100ff00ffff0000, | |
| 0x0100ff00ff00ffff, 0x0100ff00ff00ff01, 0x0100ff00ff000100, 0x0100ff00ff010000, | |
| 0x0100ff0000000000, 0x0100ff00000100ff, 0x0100ff0001ff0101, 0x0100ff0001010101, | |
| 0x0100ff0100ff00ff, 0x0100ff0100ff0001, 0x0100ff0100000100, 0x0100ff0100010001, | |
| 0x0100ff0101000000, 0x010000ffff00ff00, 0x010000ff0000ffff, 0x010000ff00000000, | |
| 0x010000ff010001ff, 0x010000ff01010001, 0x01000000ffffff00, 0x01000000ffff0101, | |
| 0x01000000ff000000, 0x01000000ff0100ff, 0x01000000ff010101, 0x0100000000ff0000, | |
| 0x010000000000ff00, 0x01000000000000ff, 0x0100000000000000, 0x0100000000000001, | |
| 0x0100000000000100, 0x0100000000010000, 0x0100000001000000, 0x0100000100000000, | |
| 0x01000001000101ff, 0x0100000101ffff01, 0x010001ffff000101, 0x010001ff00ff0100, | |
| 0x010001ff0000ff00, 0x010001ff000100ff, 0x010001ff01ffffff, 0x01000100ffff0000, | |
| 0x01000100ff0001ff, 0x0100010000000000, 0x010001000001ff00, 0x0100010001ff0000, | |
| 0x01000100010000ff, 0x0100010001000101, 0x01000101ff00ff01, 0x0100010100ff0100, | |
| 0x010001010000ffff, 0x0100010101010001, 0x0101ffffffff0101, 0x0101ffffff0001ff, | |
| 0x0101ffffff01ffff, 0x0101ffffff010101, 0x0101ffff00000000, 0x0101ffff0101ffff, | |
| 0x0101ffff010101ff, 0x0101ff00ff000000, 0x0101ff0000ff0100, 0x0101ff000000ff00, | |
| 0x0101ff0000010000, 0x0101ff00010000ff, 0x0101ff0001000001, 0x0101ff01ff010101, | |
| 0x0101ff0100000000, 0x0101ff010101ff00, 0x010100ffffff0000, 0x010100ffff010000, | |
| 0x010100ff00ff01ff, 0x010100ff000000ff, 0x010100ff00000101, 0x010100ff01ffff00, | |
| 0x01010000ffffff01, 0x01010000ff000100, 0x01010000ff01ff01, 0x0101000000000000, | |
| 0x01010000000100ff, 0x010100000101ff01, 0x01010001ffff0000, 0x01010001ff00ffff, | |
| 0x01010001ff010000, 0x0101000101ffffff, 0x0101000101ff01ff, 0x0101000101010101, | |
| 0x010101ffff01ffff, 0x010101ff00000000, 0x010101ff0001ff01, 0x010101ff0101ffff, | |
| 0x010101ff010101ff, 0x01010100ffffffff, 0x01010100ff000001, 0x010101000000ff00, | |
| 0x0101010001010000, 0x0101010100ff0001, 0x010101010001ff01, 0x010101010101ffff, | |
| }; | |
| static const uint8_t ksigns_iq2xs[128] = { | |
| 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15, | |
| 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159, | |
| 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175, | |
| 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63, | |
| 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207, | |
| 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95, | |
| 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111, | |
| 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255, | |
| }; | |
| static const uint8_t kmask_iq2xs[8] = {1, 2, 4, 8, 16, 32, 64, 128}; | |
| void dequantize_row_iq2_xxs(const block_iq2_xxs * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| uint32_t aux32[2]; | |
| const uint8_t * aux8 = (const uint8_t *)aux32; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { | |
| memcpy(aux32, x[i].qs + 4*ib32, 2*sizeof(uint32_t)); | |
| const float db = d * (0.5f + (aux32[1] >> 28)) * 0.25f; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); | |
| const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; | |
| for (int j = 0; j < 8; ++j) { | |
| y[j] = db * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); | |
| } | |
| y += 8; | |
| } | |
| } | |
| } | |
| } | |
| // ====================== 2.3125 bpw (de)-quantization | |
| void dequantize_row_iq2_xs(const block_iq2_xs * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| float db[2]; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { | |
| db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f; | |
| db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (x[i].qs[4*ib32 + l] & 511)); | |
| const uint8_t signs = ksigns_iq2xs[x[i].qs[4*ib32 + l] >> 9]; | |
| for (int j = 0; j < 8; ++j) { | |
| y[j] = db[l/2] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); | |
| } | |
| y += 8; | |
| } | |
| } | |
| } | |
| } | |
| // ====================== 3.0625 bpw (de)-quantization | |
| void dequantize_row_iq3_xxs(const block_iq3_xxs * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| uint32_t aux32; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * qs = x[i].qs; | |
| const uint8_t * scales_and_signs = qs + QK_K/4; | |
| for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { | |
| memcpy(&aux32, scales_and_signs + 4*ib32, sizeof(uint32_t)); | |
| const float db = d * (0.5f + (aux32 >> 28)) * 0.5f; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; | |
| const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + qs[2*l+0]); | |
| const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + qs[2*l+1]); | |
| for (int j = 0; j < 4; ++j) { | |
| y[j+0] = db * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); | |
| y[j+4] = db * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); | |
| } | |
| y += 8; | |
| } | |
| qs += 8; | |
| } | |
| } | |
| } | |
| // ====================== 3.3125 bpw (de)-quantization | |
| void dequantize_row_iq3_s(const block_iq3_s * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * qs = x[i].qs; | |
| const uint8_t * qh = x[i].qh; | |
| const uint8_t * signs = x[i].signs; | |
| for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { | |
| const float db1 = d * (0.5f + (x[i].scales[ib32/2] & 0xf)) * 0.5f; | |
| const float db2 = d * (0.5f + (x[i].scales[ib32/2] >> 4)) * 0.5f; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t * grid1 = (const uint8_t *)(iq3xs_grid + (qs[2*l+0] | ((qh[0] << (8-2*l)) & 256))); | |
| const uint8_t * grid2 = (const uint8_t *)(iq3xs_grid + (qs[2*l+1] | ((qh[0] << (7-2*l)) & 256))); | |
| for (int j = 0; j < 4; ++j) { | |
| y[j+0] = db1 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f); | |
| y[j+4] = db1 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f); | |
| } | |
| y += 8; | |
| } | |
| qs += 8; | |
| signs += 4; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t * grid1 = (const uint8_t *)(iq3xs_grid + (qs[2*l+0] | ((qh[1] << (8-2*l)) & 256))); | |
| const uint8_t * grid2 = (const uint8_t *)(iq3xs_grid + (qs[2*l+1] | ((qh[1] << (7-2*l)) & 256))); | |
| for (int j = 0; j < 4; ++j) { | |
| y[j+0] = db2 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f); | |
| y[j+4] = db2 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f); | |
| } | |
| y += 8; | |
| } | |
| qh += 2; | |
| qs += 8; | |
| signs += 4; | |
| } | |
| } | |
| } | |
| // ====================== 1.5625 bpw (de)-quantization | |
| void dequantize_row_iq1_s(const block_iq1_s * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| float db[4]; | |
| uint16_t idx[4]; | |
| //const int8_t * grid[4]; | |
| for (int i = 0; i < nb; i++) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * sc = x[i].scales; | |
| const uint8_t * qs = x[i].qs; | |
| for (int i8 = 0; i8 < QK_K/8; i8 += 4) { | |
| idx[0] = qs[0] | ((sc[0] & 0x08) << 5); | |
| idx[1] = qs[1] | ((sc[0] & 0x80) << 1); | |
| idx[2] = qs[2] | ((sc[1] & 0x08) << 5); | |
| idx[3] = qs[3] | ((sc[1] & 0x80) << 1); | |
| //grid[0] = (const int8_t *)(iq1s_grid + (qs[0] | ((sc[0] & 0x08) << 5))); | |
| //grid[1] = (const int8_t *)(iq1s_grid + (qs[1] | ((sc[0] & 0x80) << 1))); | |
| //grid[2] = (const int8_t *)(iq1s_grid + (qs[2] | ((sc[1] & 0x08) << 5))); | |
| //grid[3] = (const int8_t *)(iq1s_grid + (qs[3] | ((sc[1] & 0x80) << 1))); | |
| db[0] = d * (2*(sc[0] & 7) + 1); | |
| db[1] = d * (2*((sc[0] >> 4) & 7) + 1); | |
| db[2] = d * (2*(sc[1] & 7) + 1); | |
| db[3] = d * (2*((sc[1] >> 4) & 7) + 1); | |
| for (int l = 0; l < 4; ++l) { | |
| const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]); | |
| for (int j = 0; j < 8; ++j) { | |
| //y[j] = db[l] * grid[l][j]; | |
| y[j] = db[l] * grid[j]; | |
| } | |
| y += 8; | |
| } | |
| qs += 4; | |
| sc += 2; | |
| } | |
| } | |
| } | |
| static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; | |
| void dequantize_row_iq4_nl(const block_iq4_nl * restrict x, float * restrict y, int k) { | |
| assert(k % QK4_NL == 0); | |
| const int nb = k / QK4_NL; | |
| for (int i = 0; i < nb; i++) { | |
| const uint8_t * qs = x[i].qs; | |
| const float d = GGML_FP16_TO_FP32(x[i].d); | |
| for (int j = 0; j < QK4_NL/2; ++j) { | |
| y[j+ 0] = d * kvalues_iq4nl[qs[j] & 0xf]; | |
| y[j+QK4_NL/2] = d * kvalues_iq4nl[qs[j] >> 4]; | |
| } | |
| y += QK4_NL; | |
| qs += QK4_NL/2; | |
| } | |
| } | |
| //===================================== Q8_K ============================================== | |
| void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| for (int i = 0; i < nb; i++) { | |
| float max = 0; | |
| float amax = 0; | |
| for (int j = 0; j < QK_K; ++j) { | |
| float ax = fabsf(x[j]); | |
| if (ax > amax) { | |
| amax = ax; max = x[j]; | |
| } | |
| } | |
| if (!amax) { | |
| y[i].d = 0; | |
| memset(y[i].qs, 0, QK_K); | |
| x += QK_K; | |
| continue; | |
| } | |
| //const float iscale = -128.f/max; | |
| // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward | |
| const float iscale = -127.f/max; | |
| for (int j = 0; j < QK_K; ++j) { | |
| int v = nearest_int(iscale*x[j]); | |
| y[i].qs[j] = MIN(127, v); | |
| } | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| int sum = 0; | |
| for (int ii = 0; ii < 16; ++ii) { | |
| sum += y[i].qs[j*16 + ii]; | |
| } | |
| y[i].bsums[j] = sum; | |
| } | |
| y[i].d = 1/iscale; | |
| x += QK_K; | |
| } | |
| } | |
| void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| const int nb = k / QK_K; | |
| for (int i = 0; i < nb; i++) { | |
| for (int j = 0; j < QK_K; ++j) { | |
| *y++ = x[i].d * x[i].qs[j]; | |
| } | |
| } | |
| } | |
| void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) { | |
| quantize_row_q8_K_reference(x, y, k); | |
| } | |
| //===================================== Dot ptoducts ================================= | |
| // | |
| // Helper functions | |
| // | |
| // shuffles to pick the required scales in dot products | |
| static inline __m256i get_scale_shuffle_q3k(int i) { | |
| static const uint8_t k_shuffle[128] = { | |
| 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, | |
| 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, | |
| 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, | |
| 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, | |
| }; | |
| return _mm256_loadu_si256((const __m256i*)k_shuffle + i); | |
| } | |
| static inline __m256i get_scale_shuffle_k4(int i) { | |
| static const uint8_t k_shuffle[256] = { | |
| 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, | |
| 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, | |
| 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, | |
| 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, | |
| 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, | |
| 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, | |
| 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, | |
| 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 | |
| }; | |
| return _mm256_loadu_si256((const __m256i*)k_shuffle + i); | |
| } | |
| static inline __m128i get_scale_shuffle(int i) { | |
| static const uint8_t k_shuffle[128] = { | |
| 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, | |
| 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, | |
| 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, | |
| 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, | |
| 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, | |
| 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, | |
| 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, | |
| 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 | |
| }; | |
| return _mm_loadu_si128((const __m128i*)k_shuffle + i); | |
| } | |
| void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| const int qk = QK8_0; | |
| const int nb = n / qk; | |
| assert(n % qk == 0); | |
| assert((nrc == 2) || (nrc == 1)); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q4_0 * restrict x = vx; | |
| const block_q8_0 * restrict y = vy; | |
| if (nrc == 2) { | |
| const block_q4_0 * restrict vx0 = vx; | |
| const block_q4_0 * restrict vx1 = vx + bx; | |
| const block_q8_0 * restrict vy0 = vy; | |
| const block_q8_0 * restrict vy1 = vy + by; | |
| float32x4_t sumv0 = vdupq_n_f32(0.0f); | |
| for (int i = 0; i < nb; i++) { | |
| const block_q4_0 * restrict b_x0 = &vx0[i]; | |
| const block_q4_0 * restrict b_x1 = &vx1[i]; | |
| const block_q8_0 * restrict b_y0 = &vy0[i]; | |
| const block_q8_0 * restrict b_y1 = &vy1[i]; | |
| const uint8x16_t m4b = vdupq_n_u8(0x0F); | |
| const int8x16_t s8b = vdupq_n_s8(0x8); | |
| const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); | |
| const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); | |
| // 4-bit -> 8-bit | |
| const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); | |
| const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); | |
| const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); | |
| const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); | |
| // sub 8 | |
| const int8x16_t x0_l = vsubq_s8(v0_0l, s8b); | |
| const int8x16_t x0_h = vsubq_s8(v0_0h, s8b); | |
| const int8x16_t x1_l = vsubq_s8(v0_1l, s8b); | |
| const int8x16_t x1_h = vsubq_s8(v0_1h, s8b); | |
| // load y | |
| const int8x16_t y0_l = vld1q_s8(b_y0->qs); | |
| const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); | |
| const int8x16_t y1_l = vld1q_s8(b_y1->qs); | |
| const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); | |
| float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), | |
| GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), | |
| GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), | |
| GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; | |
| int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); | |
| int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); | |
| int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); | |
| int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); | |
| int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); | |
| int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); | |
| int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); | |
| int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); | |
| sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), | |
| l1, r1)), l2, r2)), l3, r3))), scale); | |
| } | |
| float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); | |
| float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); | |
| vst1_f32(s, vget_low_f32(sumv2)); | |
| vst1_f32(s + bs, vget_high_f32(sumv2)); | |
| return; | |
| } | |
| float32x4_t sumv0 = vdupq_n_f32(0.0f); | |
| float32x4_t sumv1 = vdupq_n_f32(0.0f); | |
| assert(nb % 2 == 0); // TODO: handle odd nb | |
| for (int i = 0; i < nb; i += 2) { | |
| const block_q4_0 * restrict x0 = &x[i + 0]; | |
| const block_q4_0 * restrict x1 = &x[i + 1]; | |
| const block_q8_0 * restrict y0 = &y[i + 0]; | |
| const block_q8_0 * restrict y1 = &y[i + 1]; | |
| const uint8x16_t m4b = vdupq_n_u8(0x0F); | |
| const int8x16_t s8b = vdupq_n_s8(0x8); | |
| const uint8x16_t v0_0 = vld1q_u8(x0->qs); | |
| const uint8x16_t v0_1 = vld1q_u8(x1->qs); | |
| // 4-bit -> 8-bit | |
| const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); | |
| const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); | |
| const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); | |
| const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); | |
| // sub 8 | |
| const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); | |
| const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); | |
| const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); | |
| const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); | |
| // load y | |
| const int8x16_t v1_0l = vld1q_s8(y0->qs); | |
| const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); | |
| const int8x16_t v1_1l = vld1q_s8(y1->qs); | |
| const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); | |
| // dot product into int32x4_t | |
| const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); | |
| const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); | |
| sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); | |
| sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); | |
| } | |
| *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); | |
| // Initialize accumulator with zeros | |
| __m256 acc = _mm256_setzero_ps(); | |
| // Main loop | |
| for (int i = 0; i < nb; ++i) { | |
| /* Compute combined scale for the block */ | |
| const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); | |
| __m256i qx = bytes_from_nibbles_32(x[i].qs); | |
| // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. | |
| const __m256i off = _mm256_set1_epi8( 8 ); | |
| qx = _mm256_sub_epi8( qx, off ); | |
| __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs); | |
| const __m256 q = mul_sum_i8_pairs_float(qx, qy); | |
| /* Multiply q with scale and accumulate */ | |
| acc = _mm256_fmadd_ps( d, q, acc ); | |
| } | |
| *s = hsum_float_8(acc); | |
| // Initialize accumulator with zeros | |
| __m256 acc = _mm256_setzero_ps(); | |
| // Main loop | |
| for (int i = 0; i < nb; ++i) { | |
| // Compute combined scale for the block | |
| const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); | |
| const __m128i lowMask = _mm_set1_epi8(0xF); | |
| const __m128i off = _mm_set1_epi8(8); | |
| const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs); | |
| __m128i bx_0 = _mm_and_si128(lowMask, tmp); | |
| __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs); | |
| bx_0 = _mm_sub_epi8(bx_0, off); | |
| const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); | |
| bx_0 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4)); | |
| by_0 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16)); | |
| bx_0 = _mm_sub_epi8(bx_0, off); | |
| const __m128i i32_1 = mul_sum_i8_pairs(bx_0, by_0); | |
| // Convert int32_t to float | |
| __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1)); | |
| // Apply the scale, and accumulate | |
| acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| // set constants | |
| const __m128i lowMask = _mm_set1_epi8(0xF); | |
| const __m128i off = _mm_set1_epi8(8); | |
| // Initialize accumulator with zeros | |
| __m128 acc_0 = _mm_setzero_ps(); | |
| __m128 acc_1 = _mm_setzero_ps(); | |
| __m128 acc_2 = _mm_setzero_ps(); | |
| __m128 acc_3 = _mm_setzero_ps(); | |
| // First round without accumulation | |
| { | |
| _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0); | |
| _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0); | |
| // Compute combined scale for the block 0 and 1 | |
| const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) ); | |
| const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs); | |
| __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); | |
| __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs); | |
| bx_0 = _mm_sub_epi8(bx_0, off); | |
| const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); | |
| __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); | |
| __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16)); | |
| bx_1 = _mm_sub_epi8(bx_1, off); | |
| const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); | |
| _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0); | |
| _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0); | |
| // Compute combined scale for the block 2 and 3 | |
| const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) ); | |
| const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs); | |
| __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); | |
| __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs); | |
| bx_2 = _mm_sub_epi8(bx_2, off); | |
| const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); | |
| __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); | |
| __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16)); | |
| bx_3 = _mm_sub_epi8(bx_3, off); | |
| const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); | |
| // Convert int32_t to float | |
| __m128 p0 = _mm_cvtepi32_ps(i32_0); | |
| __m128 p1 = _mm_cvtepi32_ps(i32_1); | |
| __m128 p2 = _mm_cvtepi32_ps(i32_2); | |
| __m128 p3 = _mm_cvtepi32_ps(i32_3); | |
| // Apply the scale | |
| acc_0 = _mm_mul_ps( d_0_1, p0 ); | |
| acc_1 = _mm_mul_ps( d_0_1, p1 ); | |
| acc_2 = _mm_mul_ps( d_2_3, p2 ); | |
| acc_3 = _mm_mul_ps( d_2_3, p3 ); | |
| } | |
| assert(nb % 2 == 0); // TODO: handle odd nb | |
| // Main loop | |
| for (int i = 2; i < nb; i+=2) { | |
| _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0); | |
| _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0); | |
| // Compute combined scale for the block 0 and 1 | |
| const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); | |
| const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs); | |
| __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); | |
| __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs); | |
| bx_0 = _mm_sub_epi8(bx_0, off); | |
| const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); | |
| __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); | |
| __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16)); | |
| bx_1 = _mm_sub_epi8(bx_1, off); | |
| const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); | |
| _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0); | |
| _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0); | |
| // Compute combined scale for the block 2 and 3 | |
| const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) ); | |
| const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs); | |
| __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); | |
| __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs); | |
| bx_2 = _mm_sub_epi8(bx_2, off); | |
| const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); | |
| __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); | |
| __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16)); | |
| bx_3 = _mm_sub_epi8(bx_3, off); | |
| const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); | |
| // Convert int32_t to float | |
| __m128 p0 = _mm_cvtepi32_ps(i32_0); | |
| __m128 p1 = _mm_cvtepi32_ps(i32_1); | |
| __m128 p2 = _mm_cvtepi32_ps(i32_2); | |
| __m128 p3 = _mm_cvtepi32_ps(i32_3); | |
| // Apply the scale | |
| __m128 p0_d = _mm_mul_ps( d_0_1, p0 ); | |
| __m128 p1_d = _mm_mul_ps( d_0_1, p1 ); | |
| __m128 p2_d = _mm_mul_ps( d_2_3, p2 ); | |
| __m128 p3_d = _mm_mul_ps( d_2_3, p3 ); | |
| // Acummulate | |
| acc_0 = _mm_add_ps(p0_d, acc_0); | |
| acc_1 = _mm_add_ps(p1_d, acc_1); | |
| acc_2 = _mm_add_ps(p2_d, acc_2); | |
| acc_3 = _mm_add_ps(p3_d, acc_3); | |
| } | |
| *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); | |
| float sumf = 0.0; | |
| size_t vl = __riscv_vsetvl_e8m1(qk/2); | |
| for (int i = 0; i < nb; i++) { | |
| // load elements | |
| vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); | |
| vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); | |
| vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); | |
| // mask and store lower part of x, and then upper part | |
| vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); | |
| vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); | |
| vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); | |
| vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); | |
| // subtract offset | |
| vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl); | |
| vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl); | |
| vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); | |
| vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); | |
| vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); | |
| vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); | |
| vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); | |
| int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); | |
| sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d); | |
| } | |
| *s = sumf; | |
| // scalar | |
| float sumf = 0.0; | |
| for (int i = 0; i < nb; i++) { | |
| int sumi = 0; | |
| for (int j = 0; j < qk/2; ++j) { | |
| const int v0 = (x[i].qs[j] & 0x0F) - 8; | |
| const int v1 = (x[i].qs[j] >> 4) - 8; | |
| sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); | |
| } | |
| sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d); | |
| } | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| const int qk = QK8_1; | |
| const int nb = n / qk; | |
| assert(n % qk == 0); | |
| assert((nrc == 2) || (nrc == 1)); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q4_1 * restrict x = vx; | |
| const block_q8_1 * restrict y = vy; | |
| if (nrc == 2) { | |
| const block_q4_1 * restrict vx0 = vx; | |
| const block_q4_1 * restrict vx1 = vx + bx; | |
| const block_q8_1 * restrict vy0 = vy; | |
| const block_q8_1 * restrict vy1 = vy + by; | |
| float32x4_t sumv0 = vdupq_n_f32(0.0f); | |
| float32x4_t summs0 = vdupq_n_f32(0.0f); | |
| for (int i = 0; i < nb; i++) { | |
| const block_q4_1 * restrict b_x0 = &vx0[i]; | |
| const block_q4_1 * restrict b_x1 = &vx1[i]; | |
| const block_q8_1 * restrict b_y0 = &vy0[i]; | |
| const block_q8_1 * restrict b_y1 = &vy1[i]; | |
| float32x4_t summs_t = {GGML_FP16_TO_FP32(b_x0->m) * b_y0->s, | |
| GGML_FP16_TO_FP32(b_x1->m) * b_y0->s, | |
| GGML_FP16_TO_FP32(b_x0->m) * b_y1->s, | |
| GGML_FP16_TO_FP32(b_x1->m) * b_y1->s}; | |
| summs0 += summs_t; | |
| const uint8x16_t m4b = vdupq_n_u8(0x0F); | |
| const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); | |
| const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); | |
| // 4-bit -> 8-bit | |
| const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); | |
| const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); | |
| const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); | |
| const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); | |
| // load y | |
| const int8x16_t y0_l = vld1q_s8(b_y0->qs); | |
| const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); | |
| const int8x16_t y1_l = vld1q_s8(b_y1->qs); | |
| const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); | |
| // mmla into int32x4_t | |
| float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), | |
| GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), | |
| GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), | |
| GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; | |
| int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); | |
| int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); | |
| int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); | |
| int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); | |
| int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); | |
| int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); | |
| int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); | |
| int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); | |
| sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), | |
| l1, r1)), l2, r2)), l3, r3))), scale); | |
| } | |
| float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); | |
| float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); | |
| sumv2 = sumv2 + summs0; | |
| vst1_f32(s, vget_low_f32(sumv2)); | |
| vst1_f32(s + bs, vget_high_f32(sumv2)); | |
| return; | |
| } | |
| // TODO: add WASM SIMD | |
| float32x4_t sumv0 = vdupq_n_f32(0.0f); | |
| float32x4_t sumv1 = vdupq_n_f32(0.0f); | |
| float summs = 0; | |
| assert(nb % 2 == 0); // TODO: handle odd nb | |
| for (int i = 0; i < nb; i += 2) { | |
| const block_q4_1 * restrict x0 = &x[i + 0]; | |
| const block_q4_1 * restrict x1 = &x[i + 1]; | |
| const block_q8_1 * restrict y0 = &y[i + 0]; | |
| const block_q8_1 * restrict y1 = &y[i + 1]; | |
| summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s; | |
| const uint8x16_t m4b = vdupq_n_u8(0x0F); | |
| const uint8x16_t v0_0 = vld1q_u8(x0->qs); | |
| const uint8x16_t v0_1 = vld1q_u8(x1->qs); | |
| // 4-bit -> 8-bit | |
| const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); | |
| const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); | |
| const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); | |
| const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); | |
| // load y | |
| const int8x16_t v1_0l = vld1q_s8(y0->qs); | |
| const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); | |
| const int8x16_t v1_1l = vld1q_s8(y1->qs); | |
| const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); | |
| // dot product into int32x4_t | |
| const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); | |
| const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); | |
| sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d); | |
| sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d); | |
| } | |
| *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; | |
| // Initialize accumulator with zeros | |
| __m256 acc = _mm256_setzero_ps(); | |
| float summs = 0; | |
| // Main loop | |
| for (int i = 0; i < nb; ++i) { | |
| const float d0 = GGML_FP16_TO_FP32(x[i].d); | |
| const float d1 = y[i].d; | |
| summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; | |
| const __m256 d0v = _mm256_set1_ps( d0 ); | |
| const __m256 d1v = _mm256_set1_ps( d1 ); | |
| // Compute combined scales | |
| const __m256 d0d1 = _mm256_mul_ps( d0v, d1v ); | |
| // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes | |
| const __m256i qx = bytes_from_nibbles_32(x[i].qs); | |
| const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[i].qs ); | |
| const __m256 xy = mul_sum_us8_pairs_float(qx, qy); | |
| // Accumulate d0*d1*x*y | |
| acc = _mm256_fmadd_ps( d0d1, xy, acc ); | |
| acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc ); | |
| } | |
| *s = hsum_float_8(acc) + summs; | |
| float sumf = 0.0; | |
| size_t vl = __riscv_vsetvl_e8m1(qk/2); | |
| for (int i = 0; i < nb; i++) { | |
| // load elements | |
| vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); | |
| vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); | |
| vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); | |
| // mask and store lower part of x, and then upper part | |
| vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); | |
| vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); | |
| vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); | |
| vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); | |
| vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); | |
| vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); | |
| vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); | |
| vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); | |
| vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); | |
| int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); | |
| sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; | |
| } | |
| *s = sumf; | |
| // scalar | |
| float sumf = 0.0; | |
| for (int i = 0; i < nb; i++) { | |
| int sumi = 0; | |
| for (int j = 0; j < qk/2; ++j) { | |
| const int v0 = (x[i].qs[j] & 0x0F); | |
| const int v1 = (x[i].qs[j] >> 4); | |
| sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); | |
| } | |
| sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; | |
| } | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| const int qk = QK8_0; | |
| const int nb = n / qk; | |
| assert(n % qk == 0); | |
| assert(qk == QK5_0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q5_0 * restrict x = vx; | |
| const block_q8_0 * restrict y = vy; | |
| float32x4_t sumv0 = vdupq_n_f32(0.0f); | |
| float32x4_t sumv1 = vdupq_n_f32(0.0f); | |
| uint32_t qh0; | |
| uint32_t qh1; | |
| uint64_t tmp0[4]; | |
| uint64_t tmp1[4]; | |
| assert(nb % 2 == 0); // TODO: handle odd nb | |
| for (int i = 0; i < nb; i += 2) { | |
| const block_q5_0 * restrict x0 = &x[i]; | |
| const block_q5_0 * restrict x1 = &x[i + 1]; | |
| const block_q8_0 * restrict y0 = &y[i]; | |
| const block_q8_0 * restrict y1 = &y[i + 1]; | |
| const uint8x16_t m4b = vdupq_n_u8(0x0F); | |
| // extract the 5th bit via lookup table ((!b) << 4) | |
| memcpy(&qh0, x0->qh, sizeof(qh0)); | |
| memcpy(&qh1, x1->qh, sizeof(qh1)); | |
| tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF]; | |
| tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF]; | |
| tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF]; | |
| tmp0[3] = table_b2b_1[(qh0 >> 24) ]; | |
| tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF]; | |
| tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF]; | |
| tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF]; | |
| tmp1[3] = table_b2b_1[(qh1 >> 24) ]; | |
| const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); | |
| const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); | |
| const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); | |
| const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); | |
| const uint8x16_t v0_0 = vld1q_u8(x0->qs); | |
| const uint8x16_t v0_1 = vld1q_u8(x1->qs); | |
| // 4-bit -> 8-bit | |
| int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); | |
| int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); | |
| int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); | |
| int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); | |
| // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) | |
| const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0); | |
| const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0); | |
| const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1); | |
| const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1); | |
| // load y | |
| const int8x16_t v1_0l = vld1q_s8(y0->qs); | |
| const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); | |
| const int8x16_t v1_1l = vld1q_s8(y1->qs); | |
| const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); | |
| sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( | |
| ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), | |
| ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); | |
| sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( | |
| ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), | |
| ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); | |
| } | |
| *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); | |
| v128_t sumv = wasm_f32x4_splat(0.0f); | |
| uint32_t qh; | |
| uint64_t tmp[4]; | |
| // TODO: check if unrolling this is better | |
| for (int i = 0; i < nb; ++i) { | |
| const block_q5_0 * restrict x0 = &x[i]; | |
| const block_q8_0 * restrict y0 = &y[i]; | |
| const v128_t m4b = wasm_i8x16_splat(0x0F); | |
| // extract the 5th bit | |
| memcpy(&qh, x0->qh, sizeof(qh)); | |
| tmp[0] = table_b2b_1[(qh >> 0) & 0xFF]; | |
| tmp[1] = table_b2b_1[(qh >> 8) & 0xFF]; | |
| tmp[2] = table_b2b_1[(qh >> 16) & 0xFF]; | |
| tmp[3] = table_b2b_1[(qh >> 24) ]; | |
| const v128_t qhl = wasm_v128_load(tmp + 0); | |
| const v128_t qhh = wasm_v128_load(tmp + 2); | |
| const v128_t v0 = wasm_v128_load(x0->qs); | |
| // 4-bit -> 8-bit | |
| const v128_t v0l = wasm_v128_and (v0, m4b); | |
| const v128_t v0h = wasm_u8x16_shr(v0, 4); | |
| // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) | |
| const v128_t v0lf = wasm_i8x16_sub(v0l, qhl); | |
| const v128_t v0hf = wasm_i8x16_sub(v0h, qhh); | |
| // load y | |
| const v128_t v1l = wasm_v128_load(y0->qs); | |
| const v128_t v1h = wasm_v128_load(y0->qs + 16); | |
| // int8x16 -> int16x8 | |
| const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); | |
| const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); | |
| const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); | |
| const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); | |
| const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); | |
| const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); | |
| const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); | |
| const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); | |
| // dot product | |
| sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4( | |
| wasm_i32x4_add( | |
| wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), | |
| wasm_i32x4_dot_i16x8(v0lfh, v1lh)), | |
| wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), | |
| wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), | |
| wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); | |
| } | |
| *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + | |
| wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); | |
| // Initialize accumulator with zeros | |
| __m256 acc = _mm256_setzero_ps(); | |
| // Main loop | |
| for (int i = 0; i < nb; i++) { | |
| /* Compute combined scale for the block */ | |
| const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); | |
| __m256i qx = bytes_from_nibbles_32(x[i].qs); | |
| __m256i bxhi = bytes_from_bits_32(x[i].qh); | |
| bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0)); | |
| qx = _mm256_or_si256(qx, bxhi); | |
| __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs); | |
| const __m256 q = mul_sum_i8_pairs_float(qx, qy); | |
| /* Multiply q with scale and accumulate */ | |
| acc = _mm256_fmadd_ps(d, q, acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| // Initialize accumulator with zeros | |
| __m256 acc = _mm256_setzero_ps(); | |
| __m128i mask = _mm_set1_epi8((char)0xF0); | |
| // Main loop | |
| for (int i = 0; i < nb; i++) { | |
| /* Compute combined scale for the block */ | |
| const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); | |
| __m256i bx_0 = bytes_from_nibbles_32(x[i].qs); | |
| const __m256i bxhi = bytes_from_bits_32(x[i].qh); | |
| __m128i bxhil = _mm256_castsi256_si128(bxhi); | |
| __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); | |
| bxhil = _mm_andnot_si128(bxhil, mask); | |
| bxhih = _mm_andnot_si128(bxhih, mask); | |
| __m128i bxl = _mm256_castsi256_si128(bx_0); | |
| __m128i bxh = _mm256_extractf128_si256(bx_0, 1); | |
| bxl = _mm_or_si128(bxl, bxhil); | |
| bxh = _mm_or_si128(bxh, bxhih); | |
| bx_0 = MM256_SET_M128I(bxh, bxl); | |
| const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[i].qs); | |
| const __m256 q = mul_sum_i8_pairs_float(bx_0, by_0); | |
| /* Multiply q with scale and accumulate */ | |
| acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| float sumf = 0.0; | |
| uint32_t qh; | |
| size_t vl = __riscv_vsetvl_e8m1(qk/2); | |
| // These temporary registers are for masking and shift operations | |
| vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl); | |
| vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl); | |
| vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl); | |
| vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl); | |
| for (int i = 0; i < nb; i++) { | |
| memcpy(&qh, x[i].qh, sizeof(uint32_t)); | |
| // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; | |
| vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl); | |
| vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl); | |
| vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl); | |
| // ((qh & (1u << (j + 16))) >> (j + 12)); | |
| vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl); | |
| vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl); | |
| // narrowing | |
| vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl); | |
| vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl); | |
| vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl); | |
| vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl); | |
| // load | |
| vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); | |
| vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); | |
| vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); | |
| vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); | |
| vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); | |
| vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl); | |
| vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl); | |
| vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); | |
| vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); | |
| vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl); | |
| vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl); | |
| vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); | |
| vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); | |
| vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); | |
| vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); | |
| vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); | |
| int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); | |
| sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi; | |
| } | |
| *s = sumf; | |
| // scalar | |
| float sumf = 0.0; | |
| for (int i = 0; i < nb; i++) { | |
| uint32_t qh; | |
| memcpy(&qh, x[i].qh, sizeof(qh)); | |
| int sumi = 0; | |
| for (int j = 0; j < qk/2; ++j) { | |
| const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; | |
| const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); | |
| const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16; | |
| const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16; | |
| sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); | |
| } | |
| sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi; | |
| } | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| const int qk = QK8_1; | |
| const int nb = n / qk; | |
| assert(n % qk == 0); | |
| assert(qk == QK5_1); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q5_1 * restrict x = vx; | |
| const block_q8_1 * restrict y = vy; | |
| float32x4_t sumv0 = vdupq_n_f32(0.0f); | |
| float32x4_t sumv1 = vdupq_n_f32(0.0f); | |
| float summs0 = 0.0f; | |
| float summs1 = 0.0f; | |
| uint32_t qh0; | |
| uint32_t qh1; | |
| uint64_t tmp0[4]; | |
| uint64_t tmp1[4]; | |
| assert(nb % 2 == 0); // TODO: handle odd nb | |
| for (int i = 0; i < nb; i += 2) { | |
| const block_q5_1 * restrict x0 = &x[i]; | |
| const block_q5_1 * restrict x1 = &x[i + 1]; | |
| const block_q8_1 * restrict y0 = &y[i]; | |
| const block_q8_1 * restrict y1 = &y[i + 1]; | |
| const uint8x16_t m4b = vdupq_n_u8(0x0F); | |
| summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s; | |
| summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s; | |
| // extract the 5th bit via lookup table ((b) << 4) | |
| memcpy(&qh0, x0->qh, sizeof(qh0)); | |
| memcpy(&qh1, x1->qh, sizeof(qh1)); | |
| tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF]; | |
| tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF]; | |
| tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF]; | |
| tmp0[3] = table_b2b_0[(qh0 >> 24) ]; | |
| tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF]; | |
| tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF]; | |
| tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF]; | |
| tmp1[3] = table_b2b_0[(qh1 >> 24) ]; | |
| const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); | |
| const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); | |
| const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); | |
| const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); | |
| const uint8x16_t v0_0 = vld1q_u8(x0->qs); | |
| const uint8x16_t v0_1 = vld1q_u8(x1->qs); | |
| // 4-bit -> 8-bit | |
| const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); | |
| const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); | |
| const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); | |
| const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); | |
| // add high bit | |
| const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0); | |
| const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0); | |
| const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1); | |
| const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1); | |
| // load y | |
| const int8x16_t v1_0l = vld1q_s8(y0->qs); | |
| const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); | |
| const int8x16_t v1_1l = vld1q_s8(y1->qs); | |
| const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); | |
| sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( | |
| ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), | |
| ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d); | |
| sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( | |
| ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), | |
| ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d); | |
| } | |
| *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; | |
| v128_t sumv = wasm_f32x4_splat(0.0f); | |
| float summs = 0.0f; | |
| uint32_t qh; | |
| uint64_t tmp[4]; | |
| // TODO: check if unrolling this is better | |
| for (int i = 0; i < nb; ++i) { | |
| const block_q5_1 * restrict x0 = &x[i]; | |
| const block_q8_1 * restrict y0 = &y[i]; | |
| summs += GGML_FP16_TO_FP32(x0->m) * y0->s; | |
| const v128_t m4b = wasm_i8x16_splat(0x0F); | |
| // extract the 5th bit | |
| memcpy(&qh, x0->qh, sizeof(qh)); | |
| tmp[0] = table_b2b_0[(qh >> 0) & 0xFF]; | |
| tmp[1] = table_b2b_0[(qh >> 8) & 0xFF]; | |
| tmp[2] = table_b2b_0[(qh >> 16) & 0xFF]; | |
| tmp[3] = table_b2b_0[(qh >> 24) ]; | |
| const v128_t qhl = wasm_v128_load(tmp + 0); | |
| const v128_t qhh = wasm_v128_load(tmp + 2); | |
| const v128_t v0 = wasm_v128_load(x0->qs); | |
| // 4-bit -> 8-bit | |
| const v128_t v0l = wasm_v128_and (v0, m4b); | |
| const v128_t v0h = wasm_u8x16_shr(v0, 4); | |
| // add high bit | |
| const v128_t v0lf = wasm_v128_or(v0l, qhl); | |
| const v128_t v0hf = wasm_v128_or(v0h, qhh); | |
| // load y | |
| const v128_t v1l = wasm_v128_load(y0->qs); | |
| const v128_t v1h = wasm_v128_load(y0->qs + 16); | |
| // int8x16 -> int16x8 | |
| const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); | |
| const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); | |
| const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); | |
| const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); | |
| const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); | |
| const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); | |
| const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); | |
| const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); | |
| // dot product | |
| sumv = wasm_f32x4_add(sumv, | |
| wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add( | |
| wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), | |
| wasm_i32x4_dot_i16x8(v0lfh, v1lh)), | |
| wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), | |
| wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), | |
| wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d))); | |
| } | |
| *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + | |
| wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs; | |
| // Initialize accumulator with zeros | |
| __m256 acc = _mm256_setzero_ps(); | |
| float summs = 0.0f; | |
| // Main loop | |
| for (int i = 0; i < nb; i++) { | |
| const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)); | |
| summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; | |
| __m256i qx = bytes_from_nibbles_32(x[i].qs); | |
| __m256i bxhi = bytes_from_bits_32(x[i].qh); | |
| bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10)); | |
| qx = _mm256_or_si256(qx, bxhi); | |
| const __m256 dy = _mm256_set1_ps(y[i].d); | |
| const __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs); | |
| const __m256 q = mul_sum_us8_pairs_float(qx, qy); | |
| acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc); | |
| } | |
| *s = hsum_float_8(acc) + summs; | |
| // Initialize accumulator with zeros | |
| __m256 acc = _mm256_setzero_ps(); | |
| __m128i mask = _mm_set1_epi8(0x10); | |
| float summs = 0.0f; | |
| // Main loop | |
| for (int i = 0; i < nb; i++) { | |
| const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)); | |
| summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; | |
| __m256i bx_0 = bytes_from_nibbles_32(x[i].qs); | |
| const __m256i bxhi = bytes_from_bits_32(x[i].qh); | |
| __m128i bxhil = _mm256_castsi256_si128(bxhi); | |
| __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); | |
| bxhil = _mm_and_si128(bxhil, mask); | |
| bxhih = _mm_and_si128(bxhih, mask); | |
| __m128i bxl = _mm256_castsi256_si128(bx_0); | |
| __m128i bxh = _mm256_extractf128_si256(bx_0, 1); | |
| bxl = _mm_or_si128(bxl, bxhil); | |
| bxh = _mm_or_si128(bxh, bxhih); | |
| bx_0 = MM256_SET_M128I(bxh, bxl); | |
| const __m256 dy = _mm256_set1_ps(y[i].d); | |
| const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[i].qs); | |
| const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0); | |
| acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc); | |
| } | |
| *s = hsum_float_8(acc) + summs; | |
| float sumf = 0.0; | |
| uint32_t qh; | |
| size_t vl = __riscv_vsetvl_e8m1(qk/2); | |
| // temporary registers for shift operations | |
| vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl); | |
| vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl); | |
| for (int i = 0; i < nb; i++) { | |
| memcpy(&qh, x[i].qh, sizeof(uint32_t)); | |
| // load qh | |
| vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl); | |
| // ((qh >> (j + 0)) << 4) & 0x10; | |
| vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl); | |
| vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl); | |
| vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl); | |
| // ((qh >> (j + 12)) ) & 0x10; | |
| vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl); | |
| vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl); | |
| // narrowing | |
| vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl); | |
| vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl); | |
| vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl); | |
| vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl); | |
| // load | |
| vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); | |
| vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); | |
| vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); | |
| vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); | |
| vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); | |
| vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl); | |
| vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl); | |
| vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); | |
| vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); | |
| vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); | |
| vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); | |
| vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); | |
| vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); | |
| vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); | |
| int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); | |
| sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; | |
| } | |
| *s = sumf; | |
| // scalar | |
| float sumf = 0.0; | |
| for (int i = 0; i < nb; i++) { | |
| uint32_t qh; | |
| memcpy(&qh, x[i].qh, sizeof(qh)); | |
| int sumi = 0; | |
| for (int j = 0; j < qk/2; ++j) { | |
| const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; | |
| const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; | |
| const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0; | |
| const int32_t x1 = (x[i].qs[j] >> 4) | xh_1; | |
| sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); | |
| } | |
| sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; | |
| } | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| const int qk = QK8_0; | |
| const int nb = n / qk; | |
| assert(n % qk == 0); | |
| assert((nrc == 2) || (nrc == 1)); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q8_0 * restrict x = vx; | |
| const block_q8_0 * restrict y = vy; | |
| if (nrc == 2) { | |
| const block_q8_0 * restrict vx0 = vx; | |
| const block_q8_0 * restrict vx1 = vx + bx; | |
| const block_q8_0 * restrict vy0 = vy; | |
| const block_q8_0 * restrict vy1 = vy + by; | |
| float32x4_t sumv0 = vdupq_n_f32(0.0f); | |
| for (int i = 0; i < nb; i++) { | |
| const block_q8_0 * restrict b_x0 = &vx0[i]; | |
| const block_q8_0 * restrict b_y0 = &vy0[i]; | |
| const block_q8_0 * restrict b_x1 = &vx1[i]; | |
| const block_q8_0 * restrict b_y1 = &vy1[i]; | |
| const int8x16_t x0_l = vld1q_s8(b_x0->qs); | |
| const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16); | |
| const int8x16_t x1_l = vld1q_s8(b_x1->qs); | |
| const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16); | |
| // load y | |
| const int8x16_t y0_l = vld1q_s8(b_y0->qs); | |
| const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); | |
| const int8x16_t y1_l = vld1q_s8(b_y1->qs); | |
| const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); | |
| float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), | |
| GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), | |
| GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), | |
| GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; | |
| int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); | |
| int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); | |
| int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); | |
| int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); | |
| int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); | |
| int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); | |
| int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); | |
| int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); | |
| sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), | |
| l1, r1)), l2, r2)), l3, r3))), scale); | |
| } | |
| float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); | |
| float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); | |
| vst1_f32(s, vget_low_f32(sumv2)); | |
| vst1_f32(s + bs, vget_high_f32(sumv2)); | |
| return; | |
| } | |
| float32x4_t sumv0 = vdupq_n_f32(0.0f); | |
| float32x4_t sumv1 = vdupq_n_f32(0.0f); | |
| assert(nb % 2 == 0); // TODO: handle odd nb | |
| for (int i = 0; i < nb; i += 2) { | |
| const block_q8_0 * restrict x0 = &x[i + 0]; | |
| const block_q8_0 * restrict x1 = &x[i + 1]; | |
| const block_q8_0 * restrict y0 = &y[i + 0]; | |
| const block_q8_0 * restrict y1 = &y[i + 1]; | |
| const int8x16_t x0_0 = vld1q_s8(x0->qs); | |
| const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); | |
| const int8x16_t x1_0 = vld1q_s8(x1->qs); | |
| const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); | |
| // load y | |
| const int8x16_t y0_0 = vld1q_s8(y0->qs); | |
| const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); | |
| const int8x16_t y1_0 = vld1q_s8(y1->qs); | |
| const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); | |
| sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( | |
| ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), | |
| ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); | |
| sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( | |
| ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), | |
| ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); | |
| } | |
| *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); | |
| // Initialize accumulator with zeros | |
| __m256 acc = _mm256_setzero_ps(); | |
| // Main loop | |
| for (int i = 0; i < nb; ++i) { | |
| // Compute combined scale for the block | |
| const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); | |
| __m256i qx = _mm256_loadu_si256((const __m256i *)x[i].qs); | |
| __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs); | |
| const __m256 q = mul_sum_i8_pairs_float(qx, qy); | |
| // Multiply q with scale and accumulate | |
| acc = _mm256_fmadd_ps( d, q, acc ); | |
| acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc ); | |
| } | |
| *s = hsum_float_8(acc); | |
| float sumf = 0.0; | |
| size_t vl = __riscv_vsetvl_e8m1(qk); | |
| for (int i = 0; i < nb; i++) { | |
| // load elements | |
| vint8m1_t bx_0 = __riscv_vle8_v_i8m1(x[i].qs, vl); | |
| vint8m1_t by_0 = __riscv_vle8_v_i8m1(y[i].qs, vl); | |
| vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx_0, by_0, vl); | |
| vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl); | |
| vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl); | |
| int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); | |
| sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)); | |
| } | |
| *s = sumf; | |
| // scalar | |
| float sumf = 0.0; | |
| for (int i = 0; i < nb; i++) { | |
| int sumi = 0; | |
| for (int j = 0; j < qk; j++) { | |
| sumi += x[i].qs[j]*y[i].qs[j]; | |
| } | |
| sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)); | |
| } | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q2_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| const uint8x16_t m3 = vdupq_n_u8(0x3); | |
| const uint8x16_t m4 = vdupq_n_u8(0xF); | |
| const int32x4_t vzero = vdupq_n_s32(0); | |
| ggml_int8x16x2_t q2bytes; | |
| uint8_t aux[16]; | |
| float sum = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint8_t * restrict sc = x[i].scales; | |
| const uint8x16_t mins_and_scales = vld1q_u8(sc); | |
| const uint8x16_t scales = vandq_u8(mins_and_scales, m4); | |
| vst1q_u8(aux, scales); | |
| const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); | |
| const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); | |
| const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}}; | |
| const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])), | |
| vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); | |
| const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])), | |
| vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1]))); | |
| sum += dmin * vaddvq_s32(vaddq_s32(s0, s1)); | |
| int isum = 0; | |
| int is = 0; | |
| // We use this macro instead of a function call because for some reason | |
| // the code runs 2-3% slower, even if the function is declared inline | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32; | |
| ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; | |
| q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); | |
| q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); | |
| MULTIPLY_ACCUM_WITH_SCALE(0); | |
| SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2); | |
| SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4); | |
| SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6); | |
| is += 8; | |
| } | |
| sum += d * isum; | |
| } | |
| *s = sum; | |
| const __m256i m3 = _mm256_set1_epi8(3); | |
| const __m128i m4 = _mm_set1_epi8(0xF); | |
| __m256 acc = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); | |
| const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); | |
| const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); | |
| const __m256i mins = _mm256_cvtepi8_epi16(mins8); | |
| const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums)); | |
| acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); | |
| const __m256i all_scales = _mm256_cvtepi8_epi16(scales8); | |
| const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); | |
| const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); | |
| const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; | |
| __m256i sumi = _mm256_setzero_si256(); | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32; | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q2_0 = _mm256_and_si256(q2bits, m3); | |
| const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); | |
| const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); | |
| const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); | |
| __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); | |
| __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); | |
| __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2); | |
| __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3); | |
| p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0); | |
| p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1); | |
| p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2); | |
| p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3); | |
| p0 = _mm256_add_epi32(p0, p1); | |
| p2 = _mm256_add_epi32(p2, p3); | |
| sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); | |
| } | |
| acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| const __m128i m3 = _mm_set1_epi8(0x3); | |
| const __m128i m4 = _mm_set1_epi8(0xF); | |
| const __m128i m2 = _mm_set1_epi8(0x2); | |
| __m256 acc = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| // load mins and scales from block_q2_K.scales[QK_K/16] | |
| const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); | |
| const __m128i scales16 = _mm_and_si128(mins_and_scales, m4); | |
| const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); | |
| const __m128i mins_0 = _mm_cvtepi8_epi16(mins16); | |
| const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16)); | |
| // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2 | |
| const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0])); | |
| const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8])); | |
| // sumf += -dmin * summs in 32bits*8 | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc); | |
| const __m128i scales_0 = _mm_cvtepi8_epi16(scales16); | |
| const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16)); | |
| const __m128i scales[2] = { scales_0, scales_1 }; | |
| __m128i sumi_0 = _mm_setzero_si128(); | |
| __m128i sumi_1 = _mm_setzero_si128(); | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K] | |
| const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| // load 2bits*16*8 from block_q2_K.qs[QK_K/4] | |
| __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; | |
| const __m128i q2_0 = _mm_and_si128(q2bits, m3); | |
| const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); | |
| const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); | |
| const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); | |
| q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; | |
| const __m128i q2_1 = _mm_and_si128(q2bits, m3); | |
| const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); | |
| const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); | |
| const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); | |
| // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8 | |
| __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0); | |
| __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1); | |
| __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2); | |
| __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3); | |
| __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4); | |
| __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5); | |
| __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6); | |
| __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7); | |
| // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8 | |
| __m128i shuffle = _mm_set1_epi16(0x0100); | |
| p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7); | |
| p0 = _mm_add_epi32(p0, p1); | |
| p2 = _mm_add_epi32(p2, p3); | |
| p4 = _mm_add_epi32(p4, p5); | |
| p6 = _mm_add_epi32(p6, p7); | |
| // isum in 32bits*4*2 | |
| sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2)); | |
| sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6)); | |
| } | |
| // sumf += dall * isum - dmin * summs in 32bits | |
| __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| float sumf = 0; | |
| uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
| 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * q2 = x[i].qs; | |
| const int8_t * q8 = y[i].qs; | |
| const uint8_t * sc = x[i].scales; | |
| const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| size_t vl = 16; | |
| vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl); | |
| vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl); | |
| vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl); | |
| vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl); | |
| vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl); | |
| vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); | |
| vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl); | |
| vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); | |
| sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums); | |
| vl = 32; | |
| vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); | |
| vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl); | |
| uint8_t is=0; | |
| int isum=0; | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| // load Q2 | |
| vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl); | |
| vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl); | |
| vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl); | |
| vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl); | |
| vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl); | |
| // duplicate scale elements for product | |
| vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl); | |
| vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl); | |
| vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl); | |
| vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl); | |
| vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl)); | |
| vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl)); | |
| vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl)); | |
| vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl)); | |
| // load Q8 | |
| vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); | |
| vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl); | |
| vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl); | |
| vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl); | |
| vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl); | |
| vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl); | |
| vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl); | |
| vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl); | |
| vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl); | |
| vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl); | |
| isum += __riscv_vmv_x_s_i32m1_i32(isum1); | |
| q2+=32; q8+=128; is=8; | |
| } | |
| sumf += dall * isum; | |
| } | |
| *s = sumf; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * q2 = x[i].qs; | |
| const int8_t * q8 = y[i].qs; | |
| const uint8_t * sc = x[i].scales; | |
| int summs = 0; | |
| for (int j = 0; j < 16; ++j) { | |
| summs += y[i].bsums[j] * (sc[j] >> 4); | |
| } | |
| const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| int isum = 0; | |
| int is = 0; | |
| int d; | |
| for (int k = 0; k < QK_K/128; ++k) { | |
| int shift = 0; | |
| for (int j = 0; j < 4; ++j) { | |
| d = sc[is++] & 0xF; | |
| int isuml = 0; | |
| for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); | |
| isum += d * isuml; | |
| d = sc[is++] & 0xF; | |
| isuml = 0; | |
| for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); | |
| isum += d * isuml; | |
| shift += 2; | |
| q8 += 32; | |
| } | |
| q2 += 32; | |
| } | |
| sumf += dall * isum - dmin * summs; | |
| } | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q2_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| const uint8x16_t m3 = vdupq_n_u8(0x3); | |
| const int32x4_t vzero = vdupq_n_s32(0); | |
| ggml_int8x16x4_t q2bytes; | |
| uint32_t aux32[2]; | |
| const uint8_t * scales = (const uint8_t *)aux32; | |
| float sum = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint32_t * restrict sc = (const uint32_t *)x[i].scales; | |
| aux32[0] = sc[0] & 0x0f0f0f0f; | |
| aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f; | |
| sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]); | |
| int isum1 = 0, isum2 = 0; | |
| const uint8x16_t q2bits = vld1q_u8(q2); | |
| const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); | |
| q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3)); | |
| q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3)); | |
| q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3)); | |
| q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3)); | |
| isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0]; | |
| isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1]; | |
| isum1 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2]; | |
| isum2 += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3]; | |
| sum += d * (isum1 + isum2); | |
| } | |
| *s = sum; | |
| const __m256i m3 = _mm256_set1_epi8(3); | |
| __m256 acc = _mm256_setzero_ps(); | |
| uint32_t ud, um; | |
| const uint8_t * restrict db = (const uint8_t *)&ud; | |
| const uint8_t * restrict mb = (const uint8_t *)&um; | |
| float summs = 0; | |
| // TODO: optimize this | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint32_t * restrict sc = (const uint32_t *)x[i].scales; | |
| ud = (sc[0] >> 0) & 0x0f0f0f0f; | |
| um = (sc[0] >> 4) & 0x0f0f0f0f; | |
| int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3]; | |
| summs += dmin * smin; | |
| const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); | |
| const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3); | |
| const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3); | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); | |
| const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); | |
| const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0)); | |
| const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1)); | |
| const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0)); | |
| const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1)); | |
| acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc); | |
| acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc); | |
| acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc); | |
| acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc); | |
| } | |
| *s = hsum_float_8(acc) + summs; | |
| const __m128i m3 = _mm_set1_epi8(3); | |
| __m256 acc = _mm256_setzero_ps(); | |
| uint32_t ud, um; | |
| const uint8_t * restrict db = (const uint8_t *)&ud; | |
| const uint8_t * restrict mb = (const uint8_t *)&um; | |
| float summs = 0; | |
| // TODO: optimize this | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint32_t * restrict sc = (const uint32_t *)x[i].scales; | |
| ud = (sc[0] >> 0) & 0x0f0f0f0f; | |
| um = (sc[0] >> 4) & 0x0f0f0f0f; | |
| int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3]; | |
| summs += dmin * smin; | |
| const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); | |
| const __m128i q2_0 = _mm_and_si128(q2bits, m3); | |
| const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); | |
| const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); | |
| const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0)); | |
| const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1)); | |
| const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0)); | |
| const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1)); | |
| const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0)); | |
| const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1)); | |
| const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2)); | |
| const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3)); | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc); | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc); | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc); | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc); | |
| } | |
| *s = hsum_float_8(acc) + summs; | |
| uint32_t aux32[2]; | |
| const uint8_t * scales = (const uint8_t *)aux32; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint32_t * restrict sc = (const uint32_t *)x[i].scales; | |
| aux32[0] = sc[0] & 0x0f0f0f0f; | |
| aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f; | |
| sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]); | |
| int isum1 = 0; | |
| int isum2 = 0; | |
| size_t vl = 16; | |
| vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1); | |
| // load Q2 | |
| vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl); | |
| vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl)); | |
| vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl)); | |
| vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl)); | |
| vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl)); | |
| // load Q8, and take product with Q2 | |
| vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl); | |
| vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl); | |
| vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl); | |
| vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl); | |
| vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl); | |
| vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl); | |
| vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl); | |
| vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl); | |
| isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0]; | |
| isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1]; | |
| isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2]; | |
| isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3]; | |
| sumf += d * (isum1 + isum2); | |
| } | |
| *s = sumf; | |
| float sumf = 0; | |
| int isum[4]; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * q2 = x[i].qs; | |
| const int8_t * q8 = y[i].qs; | |
| const uint8_t * sc = x[i].scales; | |
| int summs = 0; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| summs += y[i].bsums[j] * (sc[j] >> 4); | |
| } | |
| const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| isum[0] = isum[1] = isum[2] = isum[3] = 0; | |
| for (int l = 0; l < 16; ++l) { | |
| isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3); | |
| isum[1] += q8[l+16] * ((q2[l] >> 2) & 3); | |
| isum[2] += q8[l+32] * ((q2[l] >> 4) & 3); | |
| isum[3] += q8[l+48] * ((q2[l] >> 6) & 3); | |
| } | |
| for (int l = 0; l < 4; ++l) { | |
| isum[l] *= (sc[l] & 0xF); | |
| } | |
| sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs; | |
| } | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const uint32_t kmask1 = 0x03030303; | |
| const uint32_t kmask2 = 0x0f0f0f0f; | |
| const block_q3_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| uint32_t aux[3]; | |
| uint32_t utmp[4]; | |
| const uint8x16_t m3b = vdupq_n_u8(0x3); | |
| const int32x4_t vzero = vdupq_n_s32(0); | |
| const uint8x16_t m0 = vdupq_n_u8(1); | |
| const uint8x16_t m1 = vshlq_n_u8(m0, 1); | |
| const uint8x16_t m2 = vshlq_n_u8(m0, 2); | |
| const uint8x16_t m3 = vshlq_n_u8(m0, 3); | |
| const int8_t m32 = 32; | |
| ggml_int8x16x4_t q3bytes; | |
| float sum = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const uint8_t * restrict qh = x[i].hmask; | |
| const int8_t * restrict q8 = y[i].qs; | |
| ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); | |
| ggml_uint8x16x4_t q3h; | |
| int32_t isum = 0; | |
| // Set up scales | |
| memcpy(aux, x[i].scales, 12); | |
| utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); | |
| utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); | |
| utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); | |
| utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); | |
| int8_t * scale = (int8_t *)utmp; | |
| for (int j = 0; j < 16; ++j) scale[j] -= m32; | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32; | |
| const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2); | |
| q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2); | |
| q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1); | |
| q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1); | |
| q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0])); | |
| q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1])); | |
| q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2])); | |
| q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3])); | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0]; | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1]; | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2]; | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3]; | |
| scale += 4; | |
| q3h.val[0] = vbicq_u8(m2, qhbits.val[0]); | |
| q3h.val[1] = vbicq_u8(m2, qhbits.val[1]); | |
| q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1); | |
| q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1); | |
| q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0])); | |
| q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1])); | |
| q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2])); | |
| q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3])); | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0]; | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1]; | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2]; | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3]; | |
| scale += 4; | |
| if (j == 0) { | |
| qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4); | |
| qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4); | |
| } | |
| } | |
| sum += d * isum; | |
| } | |
| *s = sum; | |
| const __m256i m3 = _mm256_set1_epi8(3); | |
| const __m256i mone = _mm256_set1_epi8(1); | |
| const __m128i m32 = _mm_set1_epi8(32); | |
| __m256 acc = _mm256_setzero_ps(); | |
| uint32_t aux[3]; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| // Set up scales | |
| memcpy(aux, x[i].scales, 12); | |
| __m128i scales128 = _mm_set_epi32( | |
| ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), | |
| ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), | |
| (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), | |
| (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); | |
| scales128 = _mm_sub_epi8(scales128, m32); | |
| const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); | |
| const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); | |
| const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); | |
| const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; | |
| // high bit | |
| const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask); | |
| // integer accumulator | |
| __m256i sumi = _mm256_setzero_si256(); | |
| int bit = 0; | |
| int is = 0; | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| // load low 2 bits | |
| const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32; | |
| // prepare low and high bits | |
| const __m256i q3l_0 = _mm256_and_si256(q3bits, m3); | |
| const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); | |
| ++bit; | |
| const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); | |
| const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); | |
| ++bit; | |
| const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); | |
| const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); | |
| ++bit; | |
| const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); | |
| const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); | |
| ++bit; | |
| // load Q8 quants | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, | |
| // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, | |
| // and 2 if the high bit was set) | |
| __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); | |
| __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); | |
| __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); | |
| __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); | |
| __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); | |
| __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); | |
| __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); | |
| __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); | |
| p16_0 = _mm256_sub_epi16(p16_0, q8s_0); | |
| p16_1 = _mm256_sub_epi16(p16_1, q8s_1); | |
| p16_2 = _mm256_sub_epi16(p16_2, q8s_2); | |
| p16_3 = _mm256_sub_epi16(p16_3, q8s_3); | |
| // multiply with scales | |
| p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0); | |
| p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1); | |
| p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2); | |
| p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3); | |
| // accumulate | |
| p16_0 = _mm256_add_epi32(p16_0, p16_1); | |
| p16_2 = _mm256_add_epi32(p16_2, p16_3); | |
| sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); | |
| } | |
| // multiply with block scale and accumulate | |
| acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| const __m128i m3 = _mm_set1_epi8(3); | |
| const __m128i mone = _mm_set1_epi8(1); | |
| const __m128i m32 = _mm_set1_epi8(32); | |
| const __m128i m2 = _mm_set1_epi8(2); | |
| __m256 acc = _mm256_setzero_ps(); | |
| const uint32_t *aux; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| // Set up scales | |
| aux = (const uint32_t *)x[i].scales; | |
| __m128i scales128 = _mm_set_epi32( | |
| ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), | |
| ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), | |
| (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), | |
| (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); | |
| scales128 = _mm_sub_epi8(scales128, m32); | |
| const __m128i scales_0 = _mm_cvtepi8_epi16(scales128); | |
| const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128)); | |
| const __m128i scales[2] = { scales_0, scales_1 }; | |
| // high bit *128*2 from block_q3_K.hmask[QK_K/8] | |
| const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]); | |
| const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]); | |
| // integer accumulator | |
| __m128i sumi_0 = _mm_setzero_si128(); | |
| __m128i sumi_1 = _mm_setzero_si128(); | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4] | |
| const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; | |
| const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; | |
| // prepare low and high bits | |
| const int bit = j << 2; | |
| const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3); | |
| const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3); | |
| const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2); | |
| const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2); | |
| const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3); | |
| const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3); | |
| const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2); | |
| const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2); | |
| const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3); | |
| const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3); | |
| const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2); | |
| const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2); | |
| const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3); | |
| const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3); | |
| const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2); | |
| const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2); | |
| // load Q8 quants from block_q8_K.qs[QK_K] | |
| const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, | |
| // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, | |
| // and 2 if the high bit was set) | |
| __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0); | |
| __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1); | |
| __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2); | |
| __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3); | |
| __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4); | |
| __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5); | |
| __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6); | |
| __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7); | |
| __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0); | |
| __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1); | |
| __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2); | |
| __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3); | |
| __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4); | |
| __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5); | |
| __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6); | |
| __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7); | |
| p16_0 = _mm_sub_epi16(p16_0, q8s_0); | |
| p16_1 = _mm_sub_epi16(p16_1, q8s_1); | |
| p16_2 = _mm_sub_epi16(p16_2, q8s_2); | |
| p16_3 = _mm_sub_epi16(p16_3, q8s_3); | |
| p16_4 = _mm_sub_epi16(p16_4, q8s_4); | |
| p16_5 = _mm_sub_epi16(p16_5, q8s_5); | |
| p16_6 = _mm_sub_epi16(p16_6, q8s_6); | |
| p16_7 = _mm_sub_epi16(p16_7, q8s_7); | |
| // multiply with scales | |
| __m128i shuffle = _mm_set1_epi16(0x0100); | |
| p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7); | |
| // accumulate | |
| p16_0 = _mm_add_epi32(p16_0, p16_1); | |
| p16_2 = _mm_add_epi32(p16_2, p16_3); | |
| p16_4 = _mm_add_epi32(p16_4, p16_5); | |
| p16_6 = _mm_add_epi32(p16_6, p16_7); | |
| sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); | |
| sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6)); | |
| } | |
| // multiply with block scale and accumulate | |
| __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| uint32_t aux[3]; | |
| uint32_t utmp[4]; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const uint8_t * restrict qh = x[i].hmask; | |
| const int8_t * restrict q8 = y[i].qs; | |
| memcpy(aux, x[i].scales, 12); | |
| utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); | |
| utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); | |
| utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); | |
| utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); | |
| int8_t * scale = (int8_t *)utmp; | |
| for (int j = 0; j < 16; ++j) scale[j] -= 32; | |
| size_t vl = 32; | |
| uint8_t m = 1; | |
| vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); | |
| vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl); | |
| int sum_t = 0; | |
| for (int j = 0; j < QK_K; j += 128) { | |
| vl = 32; | |
| // load Q3 | |
| vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl); | |
| vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl)); | |
| vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl)); | |
| vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl)); | |
| vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl)); | |
| // compute mask for subtraction | |
| vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl); | |
| vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl); | |
| vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl); | |
| m <<= 1; | |
| vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl); | |
| vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl); | |
| vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl); | |
| m <<= 1; | |
| vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl); | |
| vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl); | |
| vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl); | |
| m <<= 1; | |
| vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl); | |
| vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl); | |
| vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl); | |
| m <<= 1; | |
| // load Q8 and take product with Q3 | |
| vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl); | |
| vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl); | |
| vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl); | |
| vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl); | |
| vl = 16; | |
| // retrieve lane to multiply with scale | |
| vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl); | |
| vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl); | |
| vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl); | |
| vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl); | |
| vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl); | |
| vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl); | |
| vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl); | |
| vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl); | |
| vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl); | |
| vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl); | |
| vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl); | |
| vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl); | |
| sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); | |
| q3 += 32; q8 += 128; scale += 8; | |
| } | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| sumf += d*sum_t; | |
| } | |
| *s = sumf; | |
| // scalar version | |
| // This function is written like this so the compiler can manage to vectorize most of it | |
| // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the | |
| // manually vectorized version above. Every other version I tried would run at least 4 times slower. | |
| // The ideal situation would be if we could just write the code once, and the compiler would | |
| // automatically produce the best possible set of machine instructions, instead of us having to manually | |
| // write vectorized versions for AVX, ARM_NEON, etc. | |
| int8_t aux8[QK_K]; | |
| int16_t aux16[8]; | |
| float sums [8]; | |
| int32_t aux32[8]; | |
| memset(sums, 0, 8*sizeof(float)); | |
| uint32_t auxs[4]; | |
| const int8_t * scales = (const int8_t*)auxs; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const uint8_t * restrict hm = x[i].hmask; | |
| const int8_t * restrict q8 = y[i].qs; | |
| memset(aux32, 0, 8*sizeof(int32_t)); | |
| int8_t * restrict a = aux8; | |
| uint8_t m = 1; | |
| for (int j = 0; j < QK_K; j += 128) { | |
| for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; | |
| for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); | |
| a += 32; m <<= 1; | |
| for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; | |
| for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); | |
| a += 32; m <<= 1; | |
| for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; | |
| for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); | |
| a += 32; m <<= 1; | |
| for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; | |
| for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); | |
| a += 32; m <<= 1; | |
| q3 += 32; | |
| } | |
| a = aux8; | |
| memcpy(auxs, x[i].scales, 12); | |
| uint32_t tmp = auxs[2]; | |
| auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); | |
| auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); | |
| auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); | |
| auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; | |
| q8 += 8; a += 8; | |
| } | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; | |
| } | |
| for (int l = 0; l < 8; ++l) sumf += sums[l]; | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q3_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| const int32x4_t vzero = vdupq_n_s32(0); | |
| const uint8x16_t m3b = vdupq_n_u8(0x3); | |
| const uint8x16_t mh = vdupq_n_u8(4); | |
| ggml_int8x16x4_t q3bytes; | |
| uint16_t aux16[2]; | |
| int8_t * scales = (int8_t *)aux16; | |
| float sum = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| ggml_uint8x16x4_t q3h; | |
| const uint8x8_t hbits = vld1_u8(x[i].hmask); | |
| const uint8x16_t q3bits = vld1q_u8(x[i].qs); | |
| const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(y[i].qs); | |
| const uint16_t a = *(const uint16_t *)x[i].scales; | |
| aux16[0] = a & 0x0f0f; | |
| aux16[1] = (a >> 4) & 0x0f0f; | |
| for (int j = 0; j < 4; ++j) scales[j] -= 8; | |
| int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]); | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1)); | |
| q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2)); | |
| q3h.val[1] = vandq_u8(mh, htmp); | |
| q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2)); | |
| q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4)); | |
| q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0])); | |
| q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1])); | |
| q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2])); | |
| q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3])); | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0]; | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2]; | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1]; | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3]; | |
| sum += d * isum; | |
| } | |
| *s = sum; | |
| const __m256i m3 = _mm256_set1_epi8(3); | |
| const __m256i m1 = _mm256_set1_epi8(1); | |
| __m256 acc = _mm256_setzero_ps(); | |
| uint64_t aux64; | |
| uint16_t aux16[2]; | |
| const int8_t * aux8 = (const int8_t *)aux16; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint16_t a = *(const uint16_t *)x[i].scales; | |
| aux16[0] = a & 0x0f0f; | |
| aux16[1] = (a >> 4) & 0x0f0f; | |
| const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8)); | |
| const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8)); | |
| memcpy(&aux64, x[i].hmask, 8); | |
| const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0); | |
| __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux); | |
| __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4); | |
| q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2); | |
| q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2); | |
| // load low 2 bits | |
| const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3); | |
| // prepare low and high bits | |
| const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits); | |
| const __m256i q3l_0 = _mm256_and_si256(q3aux, m3); | |
| const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3); | |
| // load Q8 quants | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, | |
| // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, | |
| // and 2 if the high bit was set) | |
| const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); | |
| const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); | |
| __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); | |
| __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); | |
| p16_0 = _mm256_sub_epi16(p16_0, q8s_0); | |
| p16_1 = _mm256_sub_epi16(p16_1, q8s_1); | |
| // multiply with scales | |
| p16_0 = _mm256_madd_epi16(scale_0, p16_0); | |
| p16_1 = _mm256_madd_epi16(scale_1, p16_1); | |
| p16_0 = _mm256_add_epi32(p16_0, p16_1); | |
| // multiply with block scale and accumulate | |
| acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| const __m128i m3 = _mm_set1_epi8(3); | |
| const __m128i m1 = _mm_set1_epi8(1); | |
| __m256 acc = _mm256_setzero_ps(); | |
| uint64_t aux64; | |
| uint16_t aux16[2]; | |
| const int8_t * aux8 = (const int8_t *)aux16; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint16_t a = *(const uint16_t *)x[i].scales; | |
| aux16[0] = a & 0x0f0f; | |
| aux16[1] = (a >> 4) & 0x0f0f; | |
| const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8); | |
| const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8); | |
| const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8); | |
| const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8); | |
| memcpy(&aux64, x[i].hmask, 8); | |
| __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0); | |
| __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2); | |
| __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4); | |
| __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6); | |
| q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2); | |
| q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2); | |
| q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2); | |
| q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2); | |
| // load low 2 bits | |
| const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3); | |
| // prepare low and high bits | |
| const __m128i q3l_0 = _mm_and_si128(q3bits, m3); | |
| const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3); | |
| const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3); | |
| const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3); | |
| // load Q8 quants | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16, | |
| // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, | |
| // and 2 if the high bit was set) | |
| const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0)); | |
| const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1)); | |
| const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0)); | |
| const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1)); | |
| __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0)); | |
| __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1)); | |
| __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0)); | |
| __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1)); | |
| p16_0 = _mm_sub_epi16(p16_0, q8s_0); | |
| p16_1 = _mm_sub_epi16(p16_1, q8s_1); | |
| p16_2 = _mm_sub_epi16(p16_2, q8s_2); | |
| p16_3 = _mm_sub_epi16(p16_3, q8s_3); | |
| // multiply with scales | |
| p16_0 = _mm_madd_epi16(scale_0, p16_0); | |
| p16_1 = _mm_madd_epi16(scale_1, p16_1); | |
| p16_2 = _mm_madd_epi16(scale_2, p16_2); | |
| p16_3 = _mm_madd_epi16(scale_3, p16_3); | |
| p16_0 = _mm_add_epi32(p16_0, p16_2); | |
| p16_1 = _mm_add_epi32(p16_1, p16_3); | |
| __m256i p16 = MM256_SET_M128I(p16_1, p16_0); | |
| // multiply with block scale and accumulate | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| uint16_t aux16[2]; | |
| int8_t * scales = (int8_t *)aux16; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint16_t a = *(const uint16_t *)x[i].scales; | |
| aux16[0] = a & 0x0f0f; | |
| aux16[1] = (a >> 4) & 0x0f0f; | |
| for (int j = 0; j < 4; ++j) scales[j] -= 8; | |
| int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]); | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); | |
| // load qh | |
| vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8); | |
| vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8)); | |
| size_t vl = 16; | |
| // extend and combine both qh_x1 and qh_x2 | |
| vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl); | |
| vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl); | |
| vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl); | |
| vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl); | |
| vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl); | |
| // load Q3 | |
| vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl); | |
| vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl); | |
| vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl); | |
| vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl); | |
| vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl); | |
| vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0); | |
| vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1); | |
| vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2); | |
| vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3); | |
| // load Q8 and take product with Q3 | |
| vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl); | |
| vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl); | |
| vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl); | |
| vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl); | |
| vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl); | |
| vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl); | |
| vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl); | |
| vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl); | |
| isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0]; | |
| isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2]; | |
| isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1]; | |
| isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3]; | |
| sumf += d * isum; | |
| } | |
| *s = sumf; | |
| int8_t aux8[QK_K]; | |
| int16_t aux16[8]; | |
| float sums [8]; | |
| int32_t aux32[8]; | |
| int32_t scales[4]; | |
| memset(sums, 0, 8*sizeof(float)); | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const uint8_t * restrict hm = x[i].hmask; | |
| const int8_t * restrict q8 = y[i].qs; | |
| int8_t * restrict a = aux8; | |
| for (int l = 0; l < 8; ++l) { | |
| a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4); | |
| a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4); | |
| a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4); | |
| a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4); | |
| a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4); | |
| a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4); | |
| a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4); | |
| a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4); | |
| } | |
| scales[0] = (x[i].scales[0] & 0xF) - 8; | |
| scales[1] = (x[i].scales[0] >> 4) - 8; | |
| scales[2] = (x[i].scales[1] & 0xF) - 8; | |
| scales[3] = (x[i].scales[1] >> 4) - 8; | |
| memset(aux32, 0, 8*sizeof(int32_t)); | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l]; | |
| } | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; | |
| } | |
| for (int l = 0; l < 8; ++l) sumf += sums[l]; | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q4_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| static const uint32_t kmask1 = 0x3f3f3f3f; | |
| static const uint32_t kmask2 = 0x0f0f0f0f; | |
| static const uint32_t kmask3 = 0x03030303; | |
| uint32_t utmp[4]; | |
| const uint8x16_t m4b = vdupq_n_u8(0xf); | |
| const int32x4_t mzero = vdupq_n_s32(0); | |
| ggml_int8x16x2_t q4bytes; | |
| ggml_int8x16x2_t q8bytes; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); | |
| memcpy(utmp, x[i].scales, 12); | |
| uint32x2_t mins8 = { 0 }; | |
| mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); | |
| mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[0] &= kmask1; | |
| const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); | |
| const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), | |
| vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); | |
| sumf -= dmin * vaddvq_s32(prod); | |
| const uint8_t * scales = (const uint8_t *)utmp; | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| int32_t sumi1 = 0; | |
| int32_t sumi2 = 0; | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; | |
| q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; | |
| q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); | |
| q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); | |
| const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); | |
| sumi1 += vaddvq_s32(p1) * scales[2*j+0]; | |
| q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; | |
| q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); | |
| q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); | |
| const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); | |
| sumi2 += vaddvq_s32(p2) * scales[2*j+1]; | |
| } | |
| sumf += d * (sumi1 + sumi2); | |
| } | |
| *s = sumf; | |
| const __m256i m4 = _mm256_set1_epi8(0xF); | |
| __m256 acc = _mm256_setzero_ps(); | |
| __m128 acc_m = _mm_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| memcpy(utmp, x[i].scales, 12); | |
| utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); | |
| const uint32_t uaux = utmp[1] & kmask1; | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[2] = uaux; | |
| utmp[0] &= kmask1; | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); | |
| const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); | |
| const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); | |
| const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); | |
| acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); | |
| const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); | |
| const __m256i scales = MM256_SET_M128I(sc128, sc128); | |
| __m256i sumi = _mm256_setzero_si256(); | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); | |
| const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); | |
| const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; | |
| const __m256i q4l = _mm256_and_si256(q4bits, m4); | |
| const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); | |
| const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); | |
| p16l = _mm256_madd_epi16(scale_l, p16l); | |
| const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); | |
| p16h = _mm256_madd_epi16(scale_h, p16h); | |
| const __m256i sumj = _mm256_add_epi32(p16l, p16h); | |
| sumi = _mm256_add_epi32(sumi, sumj); | |
| } | |
| __m256 vd = _mm256_set1_ps(d); | |
| acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); | |
| } | |
| acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); | |
| acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); | |
| *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); | |
| const __m128i m4 = _mm_set1_epi8(0xF); | |
| const __m128i m2 = _mm_set1_epi8(0x2); | |
| __m256 acc = _mm256_setzero_ps(); | |
| __m128 acc_m = _mm_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| memcpy(utmp, x[i].scales, 12); | |
| utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); | |
| const uint32_t uaux = utmp[1] & kmask1; | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[2] = uaux; | |
| utmp[0] &= kmask1; | |
| const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); | |
| const __m128i scales = _mm_cvtepu8_epi16(utmps); | |
| const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); | |
| const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); | |
| const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); | |
| const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); | |
| const __m128i prod = _mm_madd_epi16(mins, q8s); | |
| acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m); | |
| __m128i sumi_0 = _mm_setzero_si128(); | |
| __m128i sumi_1 = _mm_setzero_si128(); | |
| __m128i shuffle = _mm_set1_epi16(0x0100); | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; | |
| const __m128i q4l_0 = _mm_and_si128(q4bits, m4); | |
| const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); | |
| q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; | |
| const __m128i q4l_1 = _mm_and_si128(q4bits, m4); | |
| const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); | |
| const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0); | |
| p16l = _mm_madd_epi16(scale_l, p16l); | |
| sumi_0 = _mm_add_epi32(sumi_0, p16l); | |
| const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| p16l = _mm_maddubs_epi16(q4l_1, q8l_1); | |
| p16l = _mm_madd_epi16(scale_l, p16l); | |
| sumi_1 = _mm_add_epi32(sumi_1, p16l); | |
| const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0); | |
| p16h = _mm_madd_epi16(scale_h, p16h); | |
| sumi_0 = _mm_add_epi32(sumi_0, p16h); | |
| const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| p16h = _mm_maddubs_epi16(q4h_1, q8h_1); | |
| p16h = _mm_madd_epi16(scale_h, p16h); | |
| sumi_1 = _mm_add_epi32(sumi_1, p16h); | |
| } | |
| __m256 vd = _mm256_set1_ps(d); | |
| __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); | |
| acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); | |
| } | |
| acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); | |
| acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); | |
| *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); | |
| const uint8_t * scales = (const uint8_t*)&utmp[0]; | |
| const uint8_t * mins = (const uint8_t*)&utmp[2]; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| size_t vl = 8; | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl); | |
| vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl); | |
| vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl); | |
| memcpy(utmp, x[i].scales, 12); | |
| utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); | |
| const uint32_t uaux = utmp[1] & kmask1; | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[2] = uaux; | |
| utmp[0] &= kmask1; | |
| vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl); | |
| vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl)); | |
| vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl); | |
| vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); | |
| sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| vl = 32; | |
| int32_t sum_1 = 0; | |
| int32_t sum_2 = 0; | |
| vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1); | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| // load Q4 | |
| vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl); | |
| // load Q8 and multiply it with lower Q4 nibble | |
| vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); | |
| vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl)); | |
| vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl); | |
| vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl); | |
| sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0]; | |
| // load Q8 and multiply it with upper Q4 nibble | |
| vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl); | |
| vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl)); | |
| vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl); | |
| vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl); | |
| sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1]; | |
| q4 += 32; q8 += 64; | |
| } | |
| sumf += d*(sum_1 + sum_2); | |
| } | |
| *s = sumf; | |
| const uint8_t * scales = (const uint8_t*)&utmp[0]; | |
| const uint8_t * mins = (const uint8_t*)&utmp[2]; | |
| int8_t aux8[QK_K]; | |
| int16_t aux16[8]; | |
| float sums [8]; | |
| int32_t aux32[8]; | |
| memset(sums, 0, 8*sizeof(float)); | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| memset(aux32, 0, 8*sizeof(int32_t)); | |
| int8_t * restrict a = aux8; | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); | |
| a += 32; | |
| for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); | |
| a += 32; q4 += 32; | |
| } | |
| memcpy(utmp, x[i].scales, 12); | |
| utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); | |
| const uint32_t uaux = utmp[1] & kmask1; | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[2] = uaux; | |
| utmp[0] &= kmask1; | |
| int sumi = 0; | |
| for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; | |
| a = aux8; | |
| int is = 0; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| int32_t scale = scales[is++]; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| } | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; | |
| const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; | |
| sumf -= dmin * sumi; | |
| } | |
| for (int l = 0; l < 8; ++l) sumf += sums[l]; | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q4_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| const uint8x16_t m4b = vdupq_n_u8(0xf); | |
| const int32x4_t mzero = vdupq_n_s32(0); | |
| float sumf = 0; | |
| ggml_int8x16x2_t q4bytes; | |
| ggml_int8x16x4_t q8bytes; | |
| float sum_mins = 0.f; | |
| uint16_t aux16[2]; | |
| const uint8_t * restrict scales = (const uint8_t *)aux16; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint16_t * restrict a = (const uint16_t *)x[i].scales; | |
| aux16[0] = a[0] & 0x0f0f; | |
| aux16[1] = (a[0] >> 4) & 0x0f0f; | |
| const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]); | |
| sum_mins += y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * summi; | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]); | |
| const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); | |
| q8bytes = ggml_vld1q_s8_x4(q8); | |
| q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); | |
| q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); | |
| const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); | |
| const int32_t sumi1 = vaddvq_s32(p1) * scales[0]; | |
| q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); | |
| q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); | |
| const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]); | |
| const int32_t sumi2 = vaddvq_s32(p2) * scales[1]; | |
| sumf += d * (sumi1 + sumi2); | |
| } | |
| *s = sumf - sum_mins; | |
| const __m256i m4 = _mm256_set1_epi8(0xF); | |
| __m256 acc = _mm256_setzero_ps(); | |
| float summs = 0; | |
| uint16_t aux16[2]; | |
| const uint8_t * scales = (const uint8_t *)aux16; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d; | |
| const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d; | |
| const __m256 vd = _mm256_set1_ps(d); | |
| const uint16_t * a = (const uint16_t *)x[i].scales; | |
| aux16[0] = a[0] & 0x0f0f; | |
| aux16[1] = (a[0] >> 4) & 0x0f0f; | |
| summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); | |
| const __m256i q4l = _mm256_and_si256(q4bits, m4); | |
| const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); | |
| const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); | |
| const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); | |
| const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l); | |
| acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc); | |
| const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h); | |
| acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc); | |
| } | |
| *s = hsum_float_8(acc) - summs; | |
| const __m128i m4 = _mm_set1_epi8(0xF); | |
| __m256 acc = _mm256_setzero_ps(); | |
| float summs = 0; | |
| uint16_t aux16[2]; | |
| const uint8_t * scales = (const uint8_t *)aux16; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d; | |
| const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d; | |
| const __m256 vd = _mm256_set1_ps(d); | |
| const uint16_t * a = (const uint16_t *)x[i].scales; | |
| aux16[0] = a[0] & 0x0f0f; | |
| aux16[1] = (a[0] >> 4) & 0x0f0f; | |
| summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); | |
| const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0); | |
| const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1); | |
| const __m128i q4_0 = _mm_and_si128(q4bits_0, m4); | |
| const __m128i q4_1 = _mm_and_si128(q4bits_1, m4); | |
| const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4); | |
| const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4); | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0)); | |
| const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1)); | |
| const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0)); | |
| const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1)); | |
| const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0); | |
| const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1); | |
| acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc); | |
| const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2); | |
| const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3); | |
| acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc); | |
| } | |
| *s = hsum_float_8(acc) - summs; | |
| uint16_t s16[2]; | |
| const uint8_t * restrict scales = (const uint8_t *)s16; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint16_t * restrict b = (const uint16_t *)x[i].scales; | |
| s16[0] = b[0] & 0x0f0f; | |
| s16[1] = (b[0] >> 4) & 0x0f0f; | |
| sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]); | |
| size_t vl = 32; | |
| vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1); | |
| // load Q4 | |
| vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl); | |
| // load Q8 and multiply it with lower Q4 nibble | |
| vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl)); | |
| vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl); | |
| vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl); | |
| sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1); | |
| // load Q8 and multiply it with upper Q4 nibble | |
| vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl)); | |
| vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl); | |
| vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl); | |
| sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2); | |
| } | |
| *s = sumf; | |
| uint8_t aux8[QK_K]; | |
| int16_t aux16[16]; | |
| float sums [8]; | |
| memset(sums, 0, 8*sizeof(float)); | |
| uint16_t s16[2]; | |
| const uint8_t * restrict scales = (const uint8_t *)s16; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| uint8_t * restrict a = aux8; | |
| for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF; | |
| for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4; | |
| const uint16_t * restrict b = (const uint16_t *)x[i].scales; | |
| s16[0] = b[0] & 0x0f0f; | |
| s16[1] = (b[0] >> 4) & 0x0f0f; | |
| sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]); | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l]; | |
| q8 += 16; a += 16; | |
| for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l]; | |
| q8 += 16; a += 16; | |
| const float dl = d * scales[j]; | |
| for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]); | |
| } | |
| } | |
| for (int l = 0; l < 8; ++l) sumf += sums[l]; | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q5_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| static const uint32_t kmask1 = 0x3f3f3f3f; | |
| static const uint32_t kmask2 = 0x0f0f0f0f; | |
| static const uint32_t kmask3 = 0x03030303; | |
| uint32_t utmp[4]; | |
| const uint8x16_t m4b = vdupq_n_u8(0xf); | |
| const uint8x16_t mone = vdupq_n_u8(1); | |
| const uint8x16_t mtwo = vdupq_n_u8(2); | |
| const int32x4_t mzero = vdupq_n_s32(0); | |
| ggml_int8x16x4_t q5bytes; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); | |
| memcpy(utmp, x[i].scales, 12); | |
| utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); | |
| const uint32_t uaux = utmp[1] & kmask1; | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[2] = uaux; | |
| utmp[0] &= kmask1; | |
| const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8); | |
| const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8)); | |
| const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), | |
| vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); | |
| int32_t sumi_mins = vaddvq_s32(prod); | |
| const uint8_t * scales = (const uint8_t *)utmp; | |
| const uint8_t * restrict q5 = x[i].qs; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); | |
| ggml_uint8x16x4_t q5h; | |
| int32_t sumi = 0; | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32; | |
| const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); | |
| q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); | |
| q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3); | |
| q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3); | |
| qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2); | |
| qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2); | |
| q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0])); | |
| q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1])); | |
| q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2])); | |
| q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3])); | |
| sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++; | |
| sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++; | |
| } | |
| sumf += d * sumi - dmin * sumi_mins; | |
| } | |
| *s = sumf; | |
| const __m256i m4 = _mm256_set1_epi8(0xF); | |
| const __m128i mzero = _mm_setzero_si128(); | |
| const __m256i mone = _mm256_set1_epi8(1); | |
| __m256 acc = _mm256_setzero_ps(); | |
| float summs = 0.f; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q5 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| memcpy(utmp, x[i].scales, 12); | |
| utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); | |
| const uint32_t uaux = utmp[1] & kmask1; | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[2] = uaux; | |
| utmp[0] &= kmask1; | |
| // TODO | |
| const float d = 0, dmin = 0; | |
| const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); | |
| const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); | |
| const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); | |
| const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); | |
| const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); | |
| summs += dmin * _mm_extract_epi32(hsum, 0); | |
| const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); | |
| const __m256i scales = MM256_SET_M128I(sc128, sc128); | |
| const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh); | |
| __m256i hmask = mone; | |
| __m256i sumi = _mm256_setzero_si256(); | |
| int bit = 0; | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); | |
| const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); | |
| const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32; | |
| const __m256i q5l_0 = _mm256_and_si256(q5bits, m4); | |
| const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); | |
| const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0); | |
| hmask = _mm256_slli_epi16(hmask, 1); | |
| const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4); | |
| const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); | |
| const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1); | |
| hmask = _mm256_slli_epi16(hmask, 1); | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0); | |
| __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1); | |
| p16_0 = _mm256_madd_epi16(scale_0, p16_0); | |
| p16_1 = _mm256_madd_epi16(scale_1, p16_1); | |
| sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); | |
| } | |
| __m256 vd = _mm256_set1_ps(d); | |
| acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); | |
| } | |
| *s = hsum_float_8(acc) + summs; | |
| const __m128i m4 = _mm_set1_epi8(0xF); | |
| const __m128i mzero = _mm_setzero_si128(); | |
| const __m128i mone = _mm_set1_epi8(1); | |
| const __m128i m2 = _mm_set1_epi8(2); | |
| __m256 acc = _mm256_setzero_ps(); | |
| float summs = 0.f; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); | |
| const uint8_t * restrict q5 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| memcpy(utmp, x[i].scales, 12); | |
| utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); | |
| const uint32_t uaux = utmp[1] & kmask1; | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[2] = uaux; | |
| utmp[0] &= kmask1; | |
| const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); | |
| const __m128i scales = _mm_cvtepu8_epi16(utmps); | |
| const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); | |
| const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); | |
| const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); | |
| const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); | |
| const __m128i prod = _mm_madd_epi16(mins, q8s); | |
| const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); | |
| summs += dmin * _mm_extract_epi32(hsum, 0); | |
| const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]); | |
| const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]); | |
| __m128i hmask = mone; | |
| __m128i sumi_0 = _mm_setzero_si128(); | |
| __m128i sumi_1 = _mm_setzero_si128(); | |
| int bit = 0; | |
| __m128i shuffle = _mm_set1_epi16(0x0100); | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle); | |
| shuffle = _mm_add_epi16(shuffle, m2); | |
| const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; | |
| const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; | |
| __m128i q5l_0 = _mm_and_si128(q5bits_0, m4); | |
| __m128i q5l_1 = _mm_and_si128(q5bits_1, m4); | |
| __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); | |
| __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); | |
| __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0); | |
| __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1); | |
| hmask = _mm_slli_epi16(hmask, 1); | |
| __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0); | |
| __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1); | |
| p16_0 = _mm_madd_epi16(scale_0, p16_0); | |
| p16_1 = _mm_madd_epi16(scale_0, p16_1); | |
| q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4); | |
| q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4); | |
| q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); | |
| q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); | |
| q5_0 = _mm_add_epi8(q5l_0, q5h_0); | |
| q5_1 = _mm_add_epi8(q5l_1, q5h_1); | |
| hmask = _mm_slli_epi16(hmask, 1); | |
| q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0); | |
| __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1); | |
| p16_2 = _mm_madd_epi16(scale_1, p16_2); | |
| p16_3 = _mm_madd_epi16(scale_1, p16_3); | |
| sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); | |
| sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); | |
| } | |
| __m256 vd = _mm256_set1_ps(d); | |
| __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); | |
| acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); | |
| } | |
| *s = hsum_float_8(acc) + summs; | |
| const uint8_t * scales = (const uint8_t*)&utmp[0]; | |
| const uint8_t * mins = (const uint8_t*)&utmp[2]; | |
| float sumf = 0; | |
| float sums = 0.0; | |
| size_t vl; | |
| for (int i = 0; i < nb; ++i) { | |
| vl = 8; | |
| const uint8_t * restrict q5 = x[i].qs; | |
| const uint8_t * restrict hm = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; | |
| vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl); | |
| vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl); | |
| vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl); | |
| memcpy(utmp, x[i].scales, 12); | |
| utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); | |
| const uint32_t uaux = utmp[1] & kmask1; | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[2] = uaux; | |
| utmp[0] &= kmask1; | |
| vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl); | |
| vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl)); | |
| vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl); | |
| vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); | |
| sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); | |
| vl = 32; | |
| int32_t aux32 = 0; | |
| int is = 0; | |
| uint8_t m = 1; | |
| vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); | |
| vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl); | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| // load Q5 and Q8 | |
| vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl); | |
| vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl); | |
| vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl); | |
| // compute mask for addition | |
| vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl)); | |
| vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl); | |
| vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl); | |
| vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl); | |
| m <<= 1; | |
| vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl)); | |
| vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl); | |
| vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl); | |
| vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl); | |
| m <<= 1; | |
| vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl); | |
| vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl); | |
| vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl); | |
| vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl); | |
| vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl); | |
| vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl); | |
| aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2); | |
| q5 += 32; q8 += 64; | |
| } | |
| vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1); | |
| sums += __riscv_vfmv_f_s_f32m1_f32(vaux); | |
| } | |
| *s = sumf+sums; | |
| const uint8_t * scales = (const uint8_t*)&utmp[0]; | |
| const uint8_t * mins = (const uint8_t*)&utmp[2]; | |
| int8_t aux8[QK_K]; | |
| int16_t aux16[8]; | |
| float sums [8]; | |
| int32_t aux32[8]; | |
| memset(sums, 0, 8*sizeof(float)); | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const uint8_t * restrict hm = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| memset(aux32, 0, 8*sizeof(int32_t)); | |
| int8_t * restrict a = aux8; | |
| uint8_t m = 1; | |
| for (int j = 0; j < QK_K/64; ++j) { | |
| for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); | |
| for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); | |
| a += 32; m <<= 1; | |
| for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); | |
| for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); | |
| a += 32; m <<= 1; | |
| q4 += 32; | |
| } | |
| memcpy(utmp, x[i].scales, 12); | |
| utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); | |
| const uint32_t uaux = utmp[1] & kmask1; | |
| utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); | |
| utmp[2] = uaux; | |
| utmp[0] &= kmask1; | |
| int sumi = 0; | |
| for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; | |
| a = aux8; | |
| int is = 0; | |
| for (int j = 0; j < QK_K/32; ++j) { | |
| int32_t scale = scales[is++]; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| } | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; | |
| const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; | |
| sumf -= dmin * sumi; | |
| } | |
| for (int l = 0; l < 8; ++l) sumf += sums[l]; | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q5_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| const uint8x16_t m4b = vdupq_n_u8(0xf); | |
| const uint8x16_t mh = vdupq_n_u8(16); | |
| const int32x4_t mzero = vdupq_n_s32(0); | |
| ggml_int8x16x4_t q5bytes; | |
| ggml_uint8x16x4_t q5h; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const int8_t * sc = x[i].scales; | |
| const uint8_t * restrict q5 = x[i].qs; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint8x8_t qhbits = vld1_u8(qh); | |
| const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); | |
| const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); | |
| const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1)); | |
| q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4)); | |
| q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2)); | |
| q5h.val[2] = vbicq_u8(mh, htmp); | |
| q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2)); | |
| q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0])); | |
| q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1])); | |
| q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2])); | |
| q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3])); | |
| int32_t sumi1 = sc[0] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0])); | |
| int32_t sumi2 = sc[1] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1])); | |
| int32_t sumi3 = sc[2] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2])); | |
| int32_t sumi4 = sc[3] * vaddvq_s32(ggml_vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3])); | |
| sumf += d * (sumi1 + sumi2 + sumi3 + sumi4); | |
| } | |
| *s = sumf; | |
| const __m256i m4 = _mm256_set1_epi8(0xF); | |
| const __m256i mone = _mm256_set1_epi8(1); | |
| __m256 acc = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q5 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); | |
| const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0])); | |
| const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2])); | |
| int64_t aux64; | |
| memcpy(&aux64, x[i].qh, 8); | |
| const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64); | |
| const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128); | |
| const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4); | |
| const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4); | |
| const __m256i q5l_0 = _mm256_and_si256(q5bits, m4); | |
| const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4); | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0)); | |
| const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1)); | |
| const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0)); | |
| const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1)); | |
| const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1)); | |
| acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| const __m128i m4 = _mm_set1_epi8(0xF); | |
| const __m128i mone = _mm_set1_epi8(1); | |
| __m256 acc = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q5 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); | |
| const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]); | |
| const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]); | |
| const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]); | |
| const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]); | |
| int64_t aux64; | |
| memcpy(&aux64, x[i].qh, 8); | |
| const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64); | |
| const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2); | |
| const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4); | |
| const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4); | |
| const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4); | |
| const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4); | |
| const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4); | |
| const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4); | |
| const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4); | |
| const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4); | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0))); | |
| const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1))); | |
| const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0))); | |
| const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1))); | |
| const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0))); | |
| const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1))); | |
| const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0))); | |
| const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1))); | |
| const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2)); | |
| const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3)); | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const int8_t * sc = x[i].scales; | |
| const uint8_t * restrict q5 = x[i].qs; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); | |
| // load qh | |
| vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8); | |
| vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8)); | |
| size_t vl = 16; | |
| // combine both qh_1 and qh_2 | |
| vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl); | |
| vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl); | |
| vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl); | |
| vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl); | |
| vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl); | |
| vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0); | |
| vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1); | |
| vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2); | |
| vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3); | |
| // load q5 | |
| vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl); | |
| vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl); | |
| vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl)); | |
| vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl)); | |
| vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl)); | |
| vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl)); | |
| vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl); | |
| vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl); | |
| vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl); | |
| vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl); | |
| // load Q8 and multiply it with Q5 | |
| vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl); | |
| vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl); | |
| vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl); | |
| vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl); | |
| vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl); | |
| vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl); | |
| vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl); | |
| vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl); | |
| int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0); | |
| int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1); | |
| int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2); | |
| int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3); | |
| sumf += d * (sumi1 + sumi2 + sumi3 + sumi4); | |
| } | |
| *s = sumf; | |
| int8_t aux8[QK_K]; | |
| int16_t aux16[16]; | |
| float sums [8]; | |
| memset(sums, 0, 8*sizeof(float)); | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q4 = x[i].qs; | |
| const uint8_t * restrict hm = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| int8_t * restrict a = aux8; | |
| for (int l = 0; l < 32; ++l) { | |
| a[l+ 0] = q4[l] & 0xF; | |
| a[l+32] = q4[l] >> 4; | |
| } | |
| for (int is = 0; is < 8; ++is) { | |
| uint8_t m = 1 << is; | |
| for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16); | |
| } | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const int8_t * restrict sc = x[i].scales; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| const float dl = d * sc[j]; | |
| for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]); | |
| q8 += 16; a += 16; | |
| } | |
| } | |
| for (int l = 0; l < 8; ++l) sumf += sums[l]; | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q6_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| float sum = 0; | |
| const uint8x16_t m4b = vdupq_n_u8(0xF); | |
| const int32x4_t vzero = vdupq_n_s32(0); | |
| //const int8x16_t m32s = vdupq_n_s8(32); | |
| const uint8x16_t mone = vdupq_n_u8(3); | |
| ggml_int8x16x4_t q6bytes; | |
| ggml_uint8x16x4_t q6h; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d_all = GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q6 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const int8_t * restrict scale = x[i].scales; | |
| const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); | |
| const int8x16_t scales = vld1q_s8(scale); | |
| const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}}; | |
| const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])), | |
| vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), | |
| vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])), | |
| vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1])))); | |
| int32_t isum_mins = vaddvq_s32(prod); | |
| int32_t isum = 0; | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32; | |
| ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64; | |
| ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); | |
| q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); | |
| uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2); | |
| q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); | |
| shifted = vshrq_n_u8(qhbits.val[1], 2); | |
| q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); | |
| //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); | |
| //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); | |
| //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s); | |
| //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s); | |
| q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])); | |
| q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])); | |
| q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])); | |
| q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])); | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + | |
| vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + | |
| vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + | |
| vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; | |
| scale += 4; | |
| q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| shifted = vshrq_n_u8(qhbits.val[0], 4); | |
| q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4); | |
| shifted = vshrq_n_u8(qhbits.val[1], 4); | |
| q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); | |
| shifted = vshrq_n_u8(qhbits.val[0], 6); | |
| q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); | |
| shifted = vshrq_n_u8(qhbits.val[1], 6); | |
| q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); | |
| //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s); | |
| //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s); | |
| //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s); | |
| //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s); | |
| q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])); | |
| q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])); | |
| q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])); | |
| q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])); | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + | |
| vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + | |
| vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + | |
| vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; | |
| scale += 4; | |
| } | |
| //sum += isum * d_all * y[i].d; | |
| sum += d_all * y[i].d * (isum - 32 * isum_mins); | |
| } | |
| *s = sum; | |
| const __m256i m4 = _mm256_set1_epi8(0xF); | |
| const __m256i m2 = _mm256_set1_epi8(3); | |
| const __m256i m32s = _mm256_set1_epi8(32); | |
| __m256 acc = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q4 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); | |
| __m256i sumi = _mm256_setzero_si256(); | |
| int is = 0; | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); | |
| const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); | |
| const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); | |
| const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); | |
| is += 4; | |
| const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; | |
| const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; | |
| const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32; | |
| const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4); | |
| const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4); | |
| const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4); | |
| const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4); | |
| const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); | |
| const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); | |
| const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); | |
| const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); | |
| __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); | |
| __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); | |
| __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); | |
| __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); | |
| __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); | |
| __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); | |
| __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); | |
| p16_0 = _mm256_sub_epi16(p16_0, q8s_0); | |
| p16_1 = _mm256_sub_epi16(p16_1, q8s_1); | |
| p16_2 = _mm256_sub_epi16(p16_2, q8s_2); | |
| p16_3 = _mm256_sub_epi16(p16_3, q8s_3); | |
| p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); | |
| p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); | |
| p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); | |
| p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); | |
| sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); | |
| sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); | |
| } | |
| acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| const __m128i m4 = _mm_set1_epi8(0xF); | |
| const __m128i m3 = _mm_set1_epi8(3); | |
| const __m128i m32s = _mm_set1_epi8(32); | |
| const __m128i m2 = _mm_set1_epi8(2); | |
| __m256 acc = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q4 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); | |
| __m128i sumi_0 = _mm_setzero_si128(); | |
| __m128i sumi_1 = _mm_setzero_si128(); | |
| __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16; | |
| const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16; | |
| const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4); | |
| const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4); | |
| const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4); | |
| const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4); | |
| const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4); | |
| const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4); | |
| const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4); | |
| const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4); | |
| const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; | |
| const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; | |
| const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; | |
| const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; | |
| const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0); | |
| const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1); | |
| const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2); | |
| const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3); | |
| const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4); | |
| const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5); | |
| const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6); | |
| const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7); | |
| const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; | |
| __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0); | |
| __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1); | |
| __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2); | |
| __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3); | |
| __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4); | |
| __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5); | |
| __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6); | |
| __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7); | |
| __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0); | |
| __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1); | |
| __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2); | |
| __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3); | |
| __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4); | |
| __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5); | |
| __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6); | |
| __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7); | |
| p16_0 = _mm_sub_epi16(p16_0, q8s_0); | |
| p16_1 = _mm_sub_epi16(p16_1, q8s_1); | |
| p16_2 = _mm_sub_epi16(p16_2, q8s_2); | |
| p16_3 = _mm_sub_epi16(p16_3, q8s_3); | |
| p16_4 = _mm_sub_epi16(p16_4, q8s_4); | |
| p16_5 = _mm_sub_epi16(p16_5, q8s_5); | |
| p16_6 = _mm_sub_epi16(p16_6, q8s_6); | |
| p16_7 = _mm_sub_epi16(p16_7, q8s_7); | |
| const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle); | |
| shuffle = _mm_add_epi8(shuffle, m2); | |
| const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle); | |
| shuffle = _mm_add_epi8(shuffle, m2); | |
| const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle); | |
| shuffle = _mm_add_epi8(shuffle, m2); | |
| const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle); | |
| shuffle = _mm_add_epi8(shuffle, m2); | |
| p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); | |
| p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1); | |
| p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); | |
| p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3); | |
| p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4); | |
| p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5); | |
| p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6); | |
| p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7); | |
| sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); | |
| sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); | |
| sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6)); | |
| sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7)); | |
| } | |
| __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint8_t * restrict q6 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const int8_t * restrict scale = x[i].scales; | |
| size_t vl; | |
| vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); | |
| int sum_t = 0; | |
| int is = 0; | |
| for (int j = 0; j < QK_K/128; ++j) { | |
| vl = 32; | |
| // load qh | |
| vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl); | |
| // load Q6 | |
| vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl); | |
| vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl); | |
| vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl); | |
| vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl); | |
| vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl); | |
| vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl); | |
| vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl); | |
| vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl); | |
| vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl); | |
| vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl); | |
| vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl); | |
| vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl); | |
| vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl); | |
| vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl); | |
| vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl); | |
| vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl); | |
| vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl); | |
| vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl); | |
| // load Q8 and take product | |
| vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl); | |
| vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl); | |
| vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl); | |
| vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl); | |
| vl = 16; | |
| vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl); | |
| vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl); | |
| vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl); | |
| vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl); | |
| vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl); | |
| vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl); | |
| vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl); | |
| vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl); | |
| vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl); | |
| vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl); | |
| vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl); | |
| vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl); | |
| sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); | |
| q6 += 64; qh += 32; q8 += 128; is=8; | |
| } | |
| sumf += d * sum_t; | |
| } | |
| *s = sumf; | |
| int8_t aux8[QK_K]; | |
| int16_t aux16[8]; | |
| float sums [8]; | |
| int32_t aux32[8]; | |
| memset(sums, 0, 8*sizeof(float)); | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q4 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| memset(aux32, 0, 8*sizeof(int32_t)); | |
| int8_t * restrict a = aux8; | |
| for (int j = 0; j < QK_K; j += 128) { | |
| for (int l = 0; l < 32; ++l) { | |
| a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; | |
| a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; | |
| a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; | |
| a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; | |
| } | |
| a += 128; | |
| q4 += 64; | |
| qh += 32; | |
| } | |
| a = aux8; | |
| int is = 0; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| int scale = x[i].scales[is++]; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| } | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; | |
| } | |
| for (int l = 0; l < 8; ++l) sumf += sums[l]; | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_q6_K * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| float sum = 0; | |
| const uint8x16_t m4b = vdupq_n_u8(0xF); | |
| const int8x16_t m32s = vdupq_n_s8(32); | |
| const int32x4_t vzero = vdupq_n_s32(0); | |
| const uint8x16_t mone = vdupq_n_u8(3); | |
| ggml_int8x16x4_t q6bytes; | |
| ggml_uint8x16x4_t q6h; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d_all = GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q6 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const int8_t * restrict scale = x[i].scales; | |
| int32_t isum = 0; | |
| uint8x16_t qhbits = vld1q_u8(qh); | |
| ggml_uint8x16x2_t q6bits = ggml_vld1q_u8_x2(q6); | |
| ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); | |
| q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4); | |
| uint8x16_t shifted = vshrq_n_u8(qhbits, 2); | |
| q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); | |
| shifted = vshrq_n_u8(qhbits, 4); | |
| q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); | |
| shifted = vshrq_n_u8(qhbits, 6); | |
| q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); | |
| q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); | |
| q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); | |
| q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s); | |
| q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s); | |
| isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + | |
| vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + | |
| vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + | |
| vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; | |
| sum += isum * d_all * y[i].d; | |
| } | |
| *s = sum; | |
| const __m256i m4 = _mm256_set1_epi8(0xF); | |
| const __m256i m2 = _mm256_set1_epi8(3); | |
| const __m256i m32s = _mm256_set1_epi8(32); | |
| __m256 acc = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q4 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]); | |
| const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]); | |
| const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]); | |
| const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]); | |
| __m256i sumi = _mm256_setzero_si256(); | |
| const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1); | |
| const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3); | |
| const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); | |
| const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh); | |
| const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4); | |
| const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4); | |
| const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); | |
| const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1); | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); | |
| __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); | |
| __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); | |
| __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); | |
| p16_0 = _mm256_sub_epi16(p16_0, q8s_0); | |
| p16_1 = _mm256_sub_epi16(p16_1, q8s_1); | |
| p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); | |
| p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); | |
| sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); | |
| acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| const __m128i m4 = _mm_set1_epi8(0xF); | |
| const __m128i m2 = _mm_set1_epi8(3); | |
| const __m128i m32s = _mm_set1_epi8(32); | |
| __m256 acc = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q4 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]); | |
| const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]); | |
| const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]); | |
| const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]); | |
| __m128i sumi_0 = _mm_setzero_si128(); | |
| __m128i sumi_1 = _mm_setzero_si128(); | |
| const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1); | |
| const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3); | |
| const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); | |
| const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh); | |
| const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4); | |
| const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4); | |
| const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4); | |
| const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4); | |
| const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0); | |
| const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1); | |
| const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2); | |
| const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3); | |
| const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0)); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32)); | |
| __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0)); | |
| __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1)); | |
| __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0)); | |
| __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1)); | |
| __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0)); | |
| __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1)); | |
| __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0)); | |
| __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1)); | |
| p16_0 = _mm_sub_epi16(p16_0, q8s_0); | |
| p16_1 = _mm_sub_epi16(p16_1, q8s_1); | |
| p16_2 = _mm_sub_epi16(p16_2, q8s_2); | |
| p16_3 = _mm_sub_epi16(p16_3, q8s_3); | |
| p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); | |
| p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1); | |
| p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); | |
| p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3); | |
| sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); | |
| sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); | |
| acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc); | |
| } | |
| *s = hsum_float_8(acc); | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d_all = GGML_FP16_TO_FP32(x[i].d); | |
| const uint8_t * restrict q6 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const int8_t * restrict scale = x[i].scales; | |
| int32_t isum = 0; | |
| size_t vl = 16; | |
| vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); | |
| // load Q6 | |
| vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl); | |
| vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl); | |
| // load qh | |
| vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl); | |
| vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl); | |
| qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl); | |
| vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl); | |
| qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl); | |
| vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl); | |
| qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl); | |
| vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl); | |
| vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl); | |
| vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl); | |
| vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl); | |
| vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl); | |
| vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl); | |
| vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl); | |
| vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl); | |
| vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl); | |
| // load Q8 and take product | |
| vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl); | |
| vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl); | |
| vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl); | |
| vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl); | |
| vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl); | |
| vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl); | |
| vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl); | |
| vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl); | |
| isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0]; | |
| isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1]; | |
| isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2]; | |
| isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3]; | |
| sumf += isum * d_all * y[i].d; | |
| } | |
| *s = sumf; | |
| int8_t aux8[QK_K]; | |
| int16_t aux16[8]; | |
| float sums [8]; | |
| int32_t aux32[8]; | |
| memset(sums, 0, 8*sizeof(float)); | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const uint8_t * restrict q4 = x[i].ql; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const int8_t * restrict q8 = y[i].qs; | |
| memset(aux32, 0, 8*sizeof(int32_t)); | |
| int8_t * restrict a = aux8; | |
| for (int l = 0; l < 16; ++l) { | |
| a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; | |
| a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; | |
| a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; | |
| a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; | |
| } | |
| int is = 0; | |
| for (int j = 0; j < QK_K/16; ++j) { | |
| int scale = x[i].scales[is++]; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; | |
| for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; | |
| q8 += 8; a += 8; | |
| } | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; | |
| } | |
| for (int l = 0; l < 8; ++l) sumf += sums[l]; | |
| *s = sumf; | |
| } | |
| static const int8_t keven_signs_q2xs[1024] = { | |
| 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, | |
| 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, | |
| 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, | |
| 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, | |
| 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, | |
| 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, | |
| 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, | |
| 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, | |
| 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, | |
| 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, | |
| 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, | |
| 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, | |
| 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, | |
| 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, | |
| 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, | |
| 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, | |
| 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, | |
| 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, | |
| 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, | |
| 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, | |
| 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, | |
| 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, | |
| 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, | |
| 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, | |
| 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, | |
| 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, | |
| 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, | |
| 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, | |
| 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, | |
| 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, | |
| 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, | |
| 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, | |
| }; | |
| void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_iq2_xxs * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; | |
| uint32_t aux32[4]; | |
| const uint8_t * aux8 = (const uint8_t *)aux32; | |
| ggml_int8x16x4_t q2u; | |
| ggml_int8x16x4_t q2s; | |
| ggml_int8x16x4_t q8b; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint16_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| float sumf1 = 0, sumf2 = 0; | |
| for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { | |
| q8b = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; | |
| q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1]))); | |
| q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3]))); | |
| q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9]))); | |
| q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11]))); | |
| q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); | |
| q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); | |
| q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127)))); | |
| q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127)))); | |
| q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); | |
| q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); | |
| q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); | |
| q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); | |
| const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]); | |
| const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]); | |
| sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28)); | |
| sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28)); | |
| } | |
| sumf += d*(sumf1 + sumf2); | |
| } | |
| *s = 0.25f * sumf; | |
| const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; | |
| uint32_t aux32[4]; | |
| const uint8_t * aux8 = (const uint8_t *)aux32; | |
| __m256 accumf = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint16_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| __m256i sumi1 = _mm256_setzero_si256(); | |
| __m256i sumi2 = _mm256_setzero_si256(); | |
| for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; | |
| const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); | |
| const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); | |
| const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], | |
| signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); | |
| const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], | |
| signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); | |
| const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); | |
| const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); | |
| const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); | |
| const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); | |
| const uint16_t ls1 = aux32[1] >> 28; | |
| const uint16_t ls2 = aux32[3] >> 28; | |
| const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); | |
| const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); | |
| sumi1 = _mm256_add_epi32(sumi1, p1); | |
| sumi2 = _mm256_add_epi32(sumi2, p2); | |
| } | |
| accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); | |
| } | |
| *s = 0.125f * hsum_float_8(accumf); | |
| uint32_t aux32[2]; | |
| const uint8_t * aux8 = (const uint8_t *)aux32; | |
| float sumf = 0.f; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint16_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| int32_t bsum = 0; | |
| for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { | |
| memcpy(aux32, q2, 2*sizeof(uint32_t)); | |
| q2 += 4; | |
| const uint32_t ls = 2*(aux32[1] >> 28) + 1; | |
| int32_t sumi = 0; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); | |
| const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; | |
| for (int j = 0; j < 8; ++j) { | |
| sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); | |
| } | |
| q8 += 8; | |
| } | |
| bsum += sumi * ls; | |
| } | |
| sumf += d * bsum; | |
| } | |
| *s = 0.125f * sumf; | |
| } | |
| void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_iq2_xs * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; | |
| ggml_int8x16x4_t q2u; | |
| ggml_int8x16x4_t q2s; | |
| ggml_int8x16x4_t q8b; | |
| int32x4x4_t scales32; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint16_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| const uint8x8_t scales8 = vld1_u8(x[i].scales); | |
| const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf)); | |
| const uint8x8_t scales_h = vshr_n_u8(scales8, 4); | |
| uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h)); | |
| scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1)); | |
| const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales)); | |
| const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales)); | |
| scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1))); | |
| scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1))); | |
| scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2))); | |
| scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2))); | |
| int32x4_t sumi = vdupq_n_s32(0); | |
| for (int ib64 = 0; ib64 < QK_K/64; ++ib64) { | |
| q8b = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511)))); | |
| q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511)))); | |
| q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511)))); | |
| q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511)))); | |
| q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9)))); | |
| q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9)))); | |
| q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9)))); | |
| q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9)))); | |
| q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); | |
| q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); | |
| q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); | |
| q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); | |
| const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]); | |
| const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]); | |
| const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]); | |
| const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]); | |
| const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4)); | |
| sumi = vmlaq_s32(sumi, p, scales32.val[ib64]); | |
| q2 += 8; | |
| } | |
| sumf += d*vaddvq_s32(sumi); | |
| } | |
| *s = 0.125f * sumf; | |
| const __m128i m4 = _mm_set1_epi8(0xf); | |
| const __m128i m1 = _mm_set1_epi8(1); | |
| const __m256i m511 = _mm256_set1_epi16(511); | |
| const __m256i mone = _mm256_set1_epi8(1); | |
| static const uint8_t k_bit_helper[32] = { | |
| 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, | |
| 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, | |
| }; | |
| static const char block_sign_shuffle_mask_1[32] = { | |
| 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, | |
| 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, | |
| }; | |
| static const char block_sign_shuffle_mask_2[32] = { | |
| 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, | |
| 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, | |
| }; | |
| static const uint8_t bit_selector_mask_bytes[32] = { | |
| 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, | |
| 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, | |
| }; | |
| const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper); | |
| const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes); | |
| const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1); | |
| const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2); | |
| uint64_t aux64; | |
| // somewhat hacky, but gives a significant boost in performance | |
| __m256i aux_gindex; | |
| const uint16_t * gindex = (const uint16_t *)&aux_gindex; | |
| __m256 accumf = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint16_t * restrict q2 = x[i].qs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| memcpy(&aux64, x[i].scales, 8); | |
| __m128i stmp = _mm_set1_epi64x(aux64); | |
| stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); | |
| const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); | |
| __m256i sumi1 = _mm256_setzero_si256(); | |
| __m256i sumi2 = _mm256_setzero_si256(); | |
| for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { | |
| const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16; | |
| aux_gindex = _mm256_and_si256(q2_data, m511); | |
| const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9); | |
| const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13); | |
| const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper); | |
| const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting); | |
| const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits); | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], | |
| iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); | |
| const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], | |
| iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); | |
| const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], | |
| iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); | |
| const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], | |
| iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); | |
| const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits); | |
| const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1); | |
| const __m256i full_signs_1 = _mm256_set_m128i(full_signs_l, full_signs_l); | |
| const __m256i full_signs_2 = _mm256_set_m128i(full_signs_h, full_signs_h); | |
| __m256i signs; | |
| signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1); | |
| signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); | |
| const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone)); | |
| signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2); | |
| signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); | |
| const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone)); | |
| signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1); | |
| signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); | |
| const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone)); | |
| signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2); | |
| signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); | |
| const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone)); | |
| const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); | |
| const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); | |
| const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3); | |
| const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4); | |
| const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0))); | |
| const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1))); | |
| const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2))); | |
| const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3))); | |
| sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1)); | |
| sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2)); | |
| sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3)); | |
| sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4)); | |
| } | |
| accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); | |
| } | |
| *s = 0.125f * hsum_float_8(accumf); | |
| float sumf = 0.f; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint16_t * restrict q2 = x[i].qs; | |
| const uint8_t * restrict sc = x[i].scales; | |
| const int8_t * restrict q8 = y[i].qs; | |
| int32_t bsum = 0; | |
| for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { | |
| const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; | |
| const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; | |
| int32_t sumi = 0; | |
| for (int l = 0; l < 2; ++l) { | |
| const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); | |
| const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; | |
| for (int j = 0; j < 8; ++j) { | |
| sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); | |
| } | |
| q8 += 8; | |
| } | |
| bsum += sumi * ls1; | |
| sumi = 0; | |
| for (int l = 2; l < 4; ++l) { | |
| const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); | |
| const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; | |
| for (int j = 0; j < 8; ++j) { | |
| sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); | |
| } | |
| q8 += 8; | |
| } | |
| bsum += sumi * ls2; | |
| q2 += 4; | |
| } | |
| sumf += d * bsum; | |
| } | |
| *s = 0.125f * sumf; | |
| } | |
| void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_iq3_xxs * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; | |
| uint32_t aux32[2]; | |
| ggml_int8x16x4_t q3s; | |
| ggml_int8x16x4_t q8b; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const uint8_t * restrict gas = x[i].qs + QK_K/4; | |
| const int8_t * restrict q8 = y[i].qs; | |
| float sumf1 = 0, sumf2 = 0; | |
| for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { | |
| q8b = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t); | |
| const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]); | |
| const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]); | |
| const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]); | |
| const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]); | |
| q3 += 16; | |
| q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127)))); | |
| q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127)))); | |
| q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); | |
| q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); | |
| q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0)); | |
| q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1)); | |
| q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2)); | |
| q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3)); | |
| const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); | |
| const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); | |
| sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28)); | |
| sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28)); | |
| } | |
| sumf += d*(sumf1 + sumf2); | |
| } | |
| *s = 0.5f * sumf; | |
| const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; | |
| uint32_t aux32[2]; | |
| __m256 accumf = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const uint8_t * restrict gas = x[i].qs + QK_K/4; | |
| const int8_t * restrict q8 = y[i].qs; | |
| __m256i sumi1 = _mm256_setzero_si256(); | |
| __m256i sumi2 = _mm256_setzero_si256(); | |
| for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], | |
| iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); | |
| q3 += 8; | |
| const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], | |
| iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); | |
| q3 += 8; | |
| memcpy(aux32, gas, 8); gas += 8; | |
| const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], | |
| signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); | |
| const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], | |
| signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); | |
| const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); | |
| const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); | |
| const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); | |
| const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); | |
| const uint16_t ls1 = aux32[0] >> 28; | |
| const uint16_t ls2 = aux32[1] >> 28; | |
| const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); | |
| const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); | |
| sumi1 = _mm256_add_epi32(sumi1, p1); | |
| sumi2 = _mm256_add_epi32(sumi2, p2); | |
| } | |
| accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); | |
| } | |
| *s = 0.25f * hsum_float_8(accumf); | |
| uint32_t aux32; | |
| float sumf = 0.f; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint8_t * restrict q3 = x[i].qs; | |
| const uint8_t * restrict gas = x[i].qs + QK_K/4; | |
| const int8_t * restrict q8 = y[i].qs; | |
| int32_t bsum = 0; | |
| for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { | |
| memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); | |
| const uint32_t ls = 2*(aux32 >> 28) + 1; | |
| int32_t sumi = 0; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); | |
| const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); | |
| const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; | |
| for (int j = 0; j < 4; ++j) { | |
| sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); | |
| sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); | |
| } | |
| q8 += 8; | |
| } | |
| q3 += 8; | |
| bsum += sumi * ls; | |
| } | |
| sumf += d * bsum; | |
| } | |
| *s = 0.25f * sumf; | |
| } | |
| void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_iq3_s * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | |
| 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 | |
| }; | |
| static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; | |
| const uint8x16x2_t mask1 = vld1q_u8_x2(k_mask1); | |
| const uint8x16_t mask2 = vld1q_u8(k_mask2); | |
| uint8x16x2_t vs; | |
| ggml_int8x16x4_t q3s; | |
| ggml_int8x16x4_t q8b; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint8_t * restrict qs = x[i].qs; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const uint16_t * restrict signs = (const uint16_t *)x[i].signs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| int sumi1 = 0, sumi2 = 0; | |
| for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { | |
| q8b = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| const uint32x4_t aux32x4_0 = {iq3xs_grid[qs[ 0] | ((qh[ib32+0] << 8) & 256)], iq3xs_grid[qs[ 1] | ((qh[ib32+0] << 7) & 256)], | |
| iq3xs_grid[qs[ 2] | ((qh[ib32+0] << 6) & 256)], iq3xs_grid[qs[ 3] | ((qh[ib32+0] << 5) & 256)]}; | |
| const uint32x4_t aux32x4_1 = {iq3xs_grid[qs[ 4] | ((qh[ib32+0] << 4) & 256)], iq3xs_grid[qs[ 5] | ((qh[ib32+0] << 3) & 256)], | |
| iq3xs_grid[qs[ 6] | ((qh[ib32+0] << 2) & 256)], iq3xs_grid[qs[ 7] | ((qh[ib32+0] << 1) & 256)]}; | |
| const uint32x4_t aux32x4_2 = {iq3xs_grid[qs[ 8] | ((qh[ib32+1] << 8) & 256)], iq3xs_grid[qs[ 9] | ((qh[ib32+1] << 7) & 256)], | |
| iq3xs_grid[qs[10] | ((qh[ib32+1] << 6) & 256)], iq3xs_grid[qs[11] | ((qh[ib32+1] << 5) & 256)]}; | |
| const uint32x4_t aux32x4_3 = {iq3xs_grid[qs[12] | ((qh[ib32+1] << 4) & 256)], iq3xs_grid[qs[13] | ((qh[ib32+1] << 3) & 256)], | |
| iq3xs_grid[qs[14] | ((qh[ib32+1] << 2) & 256)], iq3xs_grid[qs[15] | ((qh[ib32+1] << 1) & 256)]}; | |
| qs += 16; | |
| vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | (signs[1] << 16))); | |
| vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); | |
| vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); | |
| vs.val[0] = vceqq_u8(vs.val[0], mask2); | |
| vs.val[1] = vceqq_u8(vs.val[1], mask2); | |
| q3s.val[0] = vsubq_s8(vreinterpretq_s8_u8(veorq_u8(vs.val[0], vreinterpretq_u8_u32(aux32x4_0))), vreinterpretq_s8_u8(vs.val[0])); | |
| q3s.val[1] = vsubq_s8(vreinterpretq_s8_u8(veorq_u8(vs.val[1], vreinterpretq_u8_u32(aux32x4_1))), vreinterpretq_s8_u8(vs.val[1])); | |
| vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | (signs[3] << 16))); | |
| vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); | |
| vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); | |
| vs.val[0] = vceqq_u8(vs.val[0], mask2); | |
| vs.val[1] = vceqq_u8(vs.val[1], mask2); | |
| signs += 4; | |
| q3s.val[2] = vsubq_s8(vreinterpretq_s8_u8(veorq_u8(vs.val[0], vreinterpretq_u8_u32(aux32x4_2))), vreinterpretq_s8_u8(vs.val[0])); | |
| q3s.val[3] = vsubq_s8(vreinterpretq_s8_u8(veorq_u8(vs.val[1], vreinterpretq_u8_u32(aux32x4_3))), vreinterpretq_s8_u8(vs.val[1])); | |
| const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); | |
| const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); | |
| sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32/2] & 0xf)); | |
| sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32/2] >> 4)); | |
| } | |
| sumf += d*(sumi1 + sumi2); | |
| } | |
| *s = 0.25f * sumf; | |
| static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | |
| 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 | |
| }; | |
| static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, | |
| 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, | |
| }; | |
| const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); | |
| const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); | |
| __m256 accumf = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint8_t * restrict qs = x[i].qs; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const uint16_t * restrict signs = (const uint16_t *)x[i].signs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| __m256i sumi1 = _mm256_setzero_si256(); | |
| __m256i sumi2 = _mm256_setzero_si256(); | |
| for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { | |
| const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; | |
| const __m256i q2_1 = _mm256_set_epi32(iq3xs_grid[qs[7] | ((qh[ib32+0] << 1) & 256)], | |
| iq3xs_grid[qs[6] | ((qh[ib32+0] << 2) & 256)], | |
| iq3xs_grid[qs[5] | ((qh[ib32+0] << 3) & 256)], | |
| iq3xs_grid[qs[4] | ((qh[ib32+0] << 4) & 256)], | |
| iq3xs_grid[qs[3] | ((qh[ib32+0] << 5) & 256)], | |
| iq3xs_grid[qs[2] | ((qh[ib32+0] << 6) & 256)], | |
| iq3xs_grid[qs[1] | ((qh[ib32+0] << 7) & 256)], | |
| iq3xs_grid[qs[0] | ((qh[ib32+0] << 8) & 256)]); | |
| qs += 8; | |
| const __m256i q2_2 = _mm256_set_epi32(iq3xs_grid[qs[7] | ((qh[ib32+1] << 1) & 256)], | |
| iq3xs_grid[qs[6] | ((qh[ib32+1] << 2) & 256)], | |
| iq3xs_grid[qs[5] | ((qh[ib32+1] << 3) & 256)], | |
| iq3xs_grid[qs[4] | ((qh[ib32+1] << 4) & 256)], | |
| iq3xs_grid[qs[3] | ((qh[ib32+1] << 5) & 256)], | |
| iq3xs_grid[qs[2] | ((qh[ib32+1] << 6) & 256)], | |
| iq3xs_grid[qs[1] | ((qh[ib32+1] << 7) & 256)], | |
| iq3xs_grid[qs[0] | ((qh[ib32+1] << 8) & 256)]); | |
| qs += 8; | |
| __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16)); | |
| aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); | |
| const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); | |
| const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); | |
| aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16)); | |
| aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); | |
| const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); | |
| const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); | |
| signs += 4; | |
| const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); | |
| const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); | |
| const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; | |
| const uint16_t ls2 = x[i].scales[ib32/2] >> 4; | |
| const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); | |
| const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); | |
| sumi1 = _mm256_add_epi32(sumi1, p1); | |
| sumi2 = _mm256_add_epi32(sumi2, p2); | |
| } | |
| accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); | |
| } | |
| *s = 0.25f * hsum_float_8(accumf); | |
| float sumf = 0.f; | |
| for (int i = 0; i < nb; ++i) { | |
| const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; | |
| const uint8_t * restrict qs = x[i].qs; | |
| const uint8_t * restrict qh = x[i].qh; | |
| const uint8_t * restrict signs = x[i].signs; | |
| const int8_t * restrict q8 = y[i].qs; | |
| int32_t bsum = 0; | |
| for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { | |
| const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; | |
| const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; | |
| int32_t sumi = 0; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t * grid1 = (const uint8_t *)(iq3xs_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); | |
| const uint8_t * grid2 = (const uint8_t *)(iq3xs_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); | |
| for (int j = 0; j < 4; ++j) { | |
| sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); | |
| sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); | |
| } | |
| q8 += 8; | |
| } | |
| qs += 8; | |
| signs += 4; | |
| bsum += sumi * ls1; | |
| sumi = 0; | |
| for (int l = 0; l < 4; ++l) { | |
| const uint8_t * grid1 = (const uint8_t *)(iq3xs_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); | |
| const uint8_t * grid2 = (const uint8_t *)(iq3xs_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); | |
| for (int j = 0; j < 4; ++j) { | |
| sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); | |
| sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); | |
| } | |
| q8 += 8; | |
| } | |
| qs += 8; | |
| signs += 4; | |
| bsum += sumi * ls2; | |
| } | |
| sumf += d * bsum; | |
| } | |
| *s = 0.25f * sumf; | |
| } | |
| static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { | |
| const __m256i ax = _mm256_sign_epi8(x, x); | |
| const __m256i sy = _mm256_sign_epi8(y, x); | |
| return _mm256_maddubs_epi16(ax, sy); | |
| } | |
| void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { | |
| assert(n % QK_K == 0); | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| const block_iq1_s * restrict x = vx; | |
| const block_q8_K * restrict y = vy; | |
| const int nb = n / QK_K; | |
| const uint8x16_t m8 = vdupq_n_u8(0x08); | |
| const uint8x16_t m7 = vdupq_n_u8(0x07); | |
| const uint8x16_t m1 = vdupq_n_u8(0x01); | |
| const int32x4_t vzero = vdupq_n_s32(0); | |
| uint16_t gindex[8]; | |
| uint16x8x2_t vindex; | |
| int8x16x4_t q1b; | |
| ggml_int8x16x4_t q8b; | |
| uint16x8x4_t scales; | |
| int32x4x2_t sumi; | |
| int32x4x2_t dotq; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const int8_t * q8 = y[i].qs; | |
| const uint8_t * qs = x[i].qs; | |
| const uint8_t * sc = x[i].scales; | |
| sumi.val[0] = sumi.val[1] = vzero; | |
| for (int i128 = 0; i128 < QK_K/128; ++i128) { | |
| const uint8x16_t ql = vld1q_u8(qs); qs += 16; | |
| const uint8x8_t tm1 = vld1_u8 (sc); sc += 8; | |
| const uint8x8_t tm2 = vshr_n_u8(tm1, 4); | |
| const uint8x16_t qh = vcombine_u8(vzip1_u8(tm1, tm2), vzip2_u8(tm1, tm2)); | |
| const uint8x16_t hbit = vandq_u8(qh, m8); | |
| vindex.val[0] = vorrq_u16(vmovl_u8(vget_low_u8 (ql)), vshlq_n_u16(vmovl_u8(vget_low_u8 (hbit)), 5)); | |
| vindex.val[1] = vorrq_u16(vmovl_u8(vget_high_u8(ql)), vshlq_n_u16(vmovl_u8(vget_high_u8(hbit)), 5)); | |
| const uint8x16_t scales8 = vorrq_u8(vshlq_n_u8(vandq_u8(qh, m7), 1), m1); | |
| scales.val[0] = vmovl_u8(vget_low_u8 (scales8)); | |
| scales.val[1] = vmovl_u8(vget_high_u8 (scales8)); | |
| for (int l = 0; l < 2; ++l) { | |
| vst1q_u16(gindex+0, vindex.val[l]); | |
| q1b.val[0] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[0])), vld1_s8((const void *)(iq1s_grid+gindex[1]))); | |
| q1b.val[1] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[2])), vld1_s8((const void *)(iq1s_grid+gindex[3]))); | |
| q1b.val[2] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[4])), vld1_s8((const void *)(iq1s_grid+gindex[5]))); | |
| q1b.val[3] = vcombine_s8(vld1_s8((const void *)(iq1s_grid+gindex[6])), vld1_s8((const void *)(iq1s_grid+gindex[7]))); | |
| q8b = ggml_vld1q_s8_x4(q8); q8 += 64; | |
| dotq.val[0] = vpaddq_s32(ggml_vdotq_s32(vzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(vzero, q1b.val[1], q8b.val[1])); | |
| dotq.val[1] = vpaddq_s32(ggml_vdotq_s32(vzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(vzero, q1b.val[3], q8b.val[3])); | |
| sumi.val[0] = vmlaq_s32(sumi.val[0], dotq.val[0], vreinterpretq_s32_u32(vmovl_u16(vget_low_u16 (scales.val[l])))); | |
| sumi.val[1] = vmlaq_s32(sumi.val[1], dotq.val[1], vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales.val[l])))); | |
| } | |
| } | |
| sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * vaddvq_s32(vaddq_s32(sumi.val[0], sumi.val[1])); | |
| } | |
| *s = sumf; | |
| const __m128i m8 = _mm_set1_epi8(0x08); | |
| const __m128i m7 = _mm_set1_epi8(0x07); | |
| const __m128i m1 = _mm_set1_epi8(0x01); | |
| const __m128i shuffle_h = _mm_set_epi8(15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0); | |
| const __m128i shuffle_s[4] = { | |
| _mm_set_epi32(0x03030303, 0x02020202, 0x01010101, 0x00000000), | |
| _mm_set_epi32(0x07070707, 0x06060606, 0x05050505, 0x04040404), | |
| _mm_set_epi32(0x0b0b0b0b, 0x0a0a0a0a, 0x09090909, 0x08080808), | |
| _mm_set_epi32(0x0f0f0f0f, 0x0e0e0e0e, 0x0d0d0d0d, 0x0c0c0c0c) | |
| }; | |
| uint64_t aux64; | |
| __m256i v_gindex; | |
| const uint16_t * gindex = (const uint16_t *)&v_gindex; | |
| __m256 accum = _mm256_setzero_ps(); | |
| for (int i = 0; i < nb; ++i) { | |
| const int8_t * q8 = y[i].qs; | |
| const uint8_t * qs = x[i].qs; | |
| const uint8_t * sc = x[i].scales; | |
| __m256i sumi = _mm256_setzero_si256(); | |
| for (int i128 = 0; i128 < QK_K/128; ++i128) { | |
| const __m128i ql = _mm_loadu_si128((const __m128i*)qs); qs += 16; | |
| memcpy(&aux64, sc, 8); sc += 8; | |
| const __m128i qh = _mm_shuffle_epi8(_mm_set_epi64x(aux64 >> 4, aux64), shuffle_h); | |
| const __m256i hbit = _mm256_cvtepu8_epi16(_mm_and_si128(qh, m8)); | |
| v_gindex = _mm256_or_si256(_mm256_cvtepu8_epi16(ql), _mm256_slli_epi16(hbit, 5)); | |
| const __m128i scales = _mm_or_si128(_mm_slli_epi16(_mm_and_si128(qh, m7), 1), m1); | |
| for (int i32 = 0; i32 < 4; ++i32) { | |
| const __m256i q8b = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; | |
| const __m256i q1b = _mm256_set_epi64x(iq1s_grid[gindex[4*i32+3]], iq1s_grid[gindex[4*i32+2]], | |
| iq1s_grid[gindex[4*i32+1]], iq1s_grid[gindex[4*i32+0]]); | |
| const __m256i dot = mul_add_epi8(q1b, q8b); | |
| const __m256i s16 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, shuffle_s[i32])); | |
| const __m256i p = _mm256_madd_epi16(s16, dot); | |
| sumi = _mm256_add_epi32(sumi, p); | |
| } | |
| } | |
| accum = _mm256_fmadd_ps(_mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)), _mm256_cvtepi32_ps(sumi), accum); | |
| } | |
| *s = hsum_float_8(accum); | |
| int db[4]; | |
| uint16_t idx[4]; | |
| float sumf = 0; | |
| for (int i = 0; i < nb; ++i) { | |
| const int8_t * q8 = y[i].qs; | |
| const uint8_t * qs = x[i].qs; | |
| const uint8_t * sc = x[i].scales; | |
| int sumi = 0; | |
| for (int i32 = 0; i32 < QK_K/32; ++i32) { | |
| idx[0] = qs[0] | ((sc[0] & 0x08) << 5); | |
| idx[1] = qs[1] | ((sc[0] & 0x80) << 1); | |
| idx[2] = qs[2] | ((sc[1] & 0x08) << 5); | |
| idx[3] = qs[3] | ((sc[1] & 0x80) << 1); | |
| db[0] = (2*(sc[0] & 7) + 1); | |
| db[1] = (2*((sc[0] >> 4) & 7) + 1); | |
| db[2] = (2*(sc[1] & 7) + 1); | |
| db[3] = (2*((sc[1] >> 4) & 7) + 1); | |
| for (int l = 0; l < 4; ++l) { | |
| const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]); | |
| int suml = 0; | |
| for (int j = 0; j < 8; ++j) suml += q8[j] * grid[j]; | |
| sumi += db[l] * suml; | |
| q8 += 8; | |
| } | |
| qs += 4; | |
| sc += 2; | |
| } | |
| sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * sumi; | |
| } | |
| *s = sumf; | |
| } | |
| void ggml_vec_dot_iq4_nl_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { | |
| assert(nrc == 1); | |
| UNUSED(nrc); | |
| UNUSED(bx); | |
| UNUSED(by); | |
| UNUSED(bs); | |
| assert(n % QK4_NL == 0); | |
| static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); | |
| const block_iq4_nl * restrict x = vx; | |
| const block_q8_0 * restrict y = vy; | |
| const int nb = n / QK4_NL; | |
| const int8x16_t values = vld1q_s8(kvalues_iq4nl); | |
| const uint8x16_t m4b = vdupq_n_u8(0x0f); | |
| uint8x16x2_t q4bits; | |
| int8x16x4_t q4b; | |
| int8x16x4_t q8b; | |
| int32x4_t prod_1, prod_2; | |
| float sumf = 0; | |
| for (int ib = 0; ib < nb; ib += 2) { | |
| q4bits.val[0] = vld1q_u8(x[ib+0].qs); | |
| q4bits.val[1] = vld1q_u8(x[ib+1].qs); | |
| q8b.val[0] = vld1q_s8(y[ib+0].qs); | |
| q8b.val[1] = vld1q_s8(y[ib+0].qs + 16); | |
| q8b.val[2] = vld1q_s8(y[ib+1].qs); | |
| q8b.val[3] = vld1q_s8(y[ib+1].qs + 16); | |
| q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); | |
| q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); | |
| q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); | |
| q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); | |
| prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); | |
| prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); | |
| sumf += | |
| GGML_FP16_TO_FP32(x[ib+0].d) * GGML_FP16_TO_FP32(y[ib+0].d) * vaddvq_s32(prod_1) + | |
| GGML_FP16_TO_FP32(x[ib+1].d) * GGML_FP16_TO_FP32(y[ib+1].d) * vaddvq_s32(prod_2); | |
| } | |
| *s = sumf; | |
| const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); | |
| const __m128i m4b = _mm_set1_epi8(0x0f); | |
| const __m256i mone = _mm256_set1_epi16(1); | |
| __m256 accum1 = _mm256_setzero_ps(); | |
| __m256 accum2 = _mm256_setzero_ps(); | |
| for (int ib = 0; ib < nb; ib += 2) { | |
| const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[0].qs); | |
| const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[1].qs); | |
| const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[0].qs); | |
| const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[1].qs); | |
| const __m256i q4b_1 = _mm256_set_m128i(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), | |
| _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); | |
| const __m256i q4b_2 = _mm256_set_m128i(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), | |
| _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); | |
| const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); | |
| const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); | |
| const __m256i p_1 = _mm256_madd_epi16(p16_1, mone); | |
| const __m256i p_2 = _mm256_madd_epi16(p16_2, mone); | |
| accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[0].d)*GGML_FP16_TO_FP32(x[0].d)), | |
| _mm256_cvtepi32_ps(p_1), accum1); | |
| accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[1].d)*GGML_FP16_TO_FP32(x[1].d)), | |
| _mm256_cvtepi32_ps(p_2), accum2); | |
| y += 2; | |
| x += 2; | |
| } | |
| *s = hsum_float_8(_mm256_add_ps(accum1, accum2)); | |
| float sumf = 0; | |
| for (int ib = 0; ib < nb; ++ib) { | |
| const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); | |
| int sumi1 = 0, sumi2 = 0; | |
| for (int j = 0; j < QK4_NL/2; ++j) { | |
| sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; | |
| sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; | |
| } | |
| sumf += d * (sumi1 + sumi2); | |
| } | |
| *s = sumf; | |
| } | |
| // ================================ IQ2 quantization ============================================= | |
| typedef struct { | |
| uint64_t * grid; | |
| int * map; | |
| uint16_t * neighbours; | |
| } iq2_entry_t; | |
| static iq2_entry_t iq2_data[3] = { | |
| {NULL, NULL, NULL}, | |
| {NULL, NULL, NULL}, | |
| {NULL, NULL, NULL}, | |
| }; | |
| static inline int iq2_data_index(enum ggml_type type) { | |
| GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S); | |
| return type == GGML_TYPE_IQ2_XXS ? 0 : | |
| type == GGML_TYPE_IQ2_XS ? 1 : 2; | |
| } | |
| static inline int iq2_grid_size(enum ggml_type type) { | |
| GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S); | |
| return type == GGML_TYPE_IQ2_XXS ? 256 : | |
| type == GGML_TYPE_IQ2_XS ? 512 : 512; | |
| } | |
| static int iq2_compare_func(const void * left, const void * right) { | |
| const int * l = (const int *)left; | |
| const int * r = (const int *)right; | |
| return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0; | |
| } | |
| void iq2xs_init_impl(enum ggml_type type) { | |
| const int gindex = iq2_data_index(type); | |
| const int grid_size = iq2_grid_size(type); | |
| if (iq2_data[gindex].grid) { | |
| return; | |
| } | |
| static const uint16_t kgrid_2bit_256[256] = { | |
| 0, 2, 5, 8, 10, 17, 20, 32, 34, 40, 42, 65, 68, 80, 88, 97, | |
| 100, 128, 130, 138, 162, 257, 260, 272, 277, 320, 388, 408, 512, 514, 546, 642, | |
| 1025, 1028, 1040, 1057, 1060, 1088, 1090, 1096, 1120, 1153, 1156, 1168, 1188, 1280, 1282, 1288, | |
| 1312, 1350, 1385, 1408, 1425, 1545, 1552, 1600, 1668, 1700, 2048, 2053, 2056, 2068, 2088, 2113, | |
| 2116, 2128, 2130, 2184, 2308, 2368, 2562, 2580, 4097, 4100, 4112, 4129, 4160, 4192, 4228, 4240, | |
| 4245, 4352, 4360, 4384, 4432, 4442, 4480, 4644, 4677, 5120, 5128, 5152, 5157, 5193, 5248, 5400, | |
| 5474, 5632, 5654, 6145, 6148, 6160, 6208, 6273, 6400, 6405, 6560, 6737, 8192, 8194, 8202, 8260, | |
| 8289, 8320, 8322, 8489, 8520, 8704, 8706, 9217, 9220, 9232, 9280, 9302, 9472, 9537, 9572, 9872, | |
| 10248, 10272, 10388, 10820, 16385, 16388, 16400, 16408, 16417, 16420, 16448, 16456, 16470, 16480, 16513, 16516, | |
| 16528, 16640, 16672, 16737, 16768, 16773, 16897, 16912, 16968, 16982, 17000, 17408, 17416, 17440, 17536, 17561, | |
| 17682, 17700, 17920, 18433, 18436, 18448, 18496, 18501, 18688, 18776, 18785, 18818, 19013, 19088, 20480, 20488, | |
| 20497, 20505, 20512, 20608, 20616, 20740, 20802, 20900, 21137, 21648, 21650, 21770, 22017, 22100, 22528, 22545, | |
| 22553, 22628, 22848, 23048, 24580, 24592, 24640, 24680, 24832, 24917, 25112, 25184, 25600, 25605, 25872, 25874, | |
| 25988, 26690, 32768, 32770, 32778, 32833, 32898, 33028, 33048, 33088, 33297, 33793, 33796, 33808, 33813, 33856, | |
| 33888, 34048, 34118, 34196, 34313, 34368, 34400, 34818, 35076, 35345, 36868, 36880, 36900, 36928, 37025, 37142, | |
| 37248, 37445, 37888, 37922, 37956, 38225, 39041, 39200, 40962, 41040, 41093, 41225, 41472, 42008, 43088, 43268, | |
| }; | |
| static const uint16_t kgrid_2bit_512[512] = { | |
| 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70, | |
| 73, 80, 82, 85, 88, 97, 100, 128, 130, 133, 136, 145, 148, 153, 160, 257, | |
| 260, 262, 265, 272, 274, 277, 280, 282, 289, 292, 320, 322, 325, 328, 337, 340, | |
| 352, 360, 385, 388, 400, 512, 514, 517, 520, 529, 532, 544, 577, 580, 592, 597, | |
| 640, 650, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1088, 1090, 1093, 1096, | |
| 1105, 1108, 1110, 1120, 1153, 1156, 1168, 1280, 1282, 1285, 1288, 1297, 1300, 1312, 1345, 1348, | |
| 1360, 1377, 1408, 1537, 1540, 1552, 1574, 1600, 1602, 1668, 2048, 2050, 2053, 2056, 2058, 2065, | |
| 2068, 2080, 2085, 2113, 2116, 2128, 2136, 2176, 2208, 2218, 2305, 2308, 2320, 2368, 2433, 2441, | |
| 2560, 2592, 2600, 2710, 2720, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4160, | |
| 4162, 4165, 4168, 4177, 4180, 4192, 4202, 4225, 4228, 4240, 4352, 4354, 4357, 4360, 4369, 4372, | |
| 4384, 4417, 4420, 4432, 4480, 4500, 4502, 4609, 4612, 4614, 4624, 4672, 4704, 5120, 5122, 5125, | |
| 5128, 5137, 5140, 5152, 5185, 5188, 5193, 5200, 5220, 5248, 5377, 5380, 5392, 5440, 5632, 5652, | |
| 5705, 6145, 6148, 6160, 6162, 6208, 6228, 6278, 6400, 6405, 6502, 6737, 6825, 8192, 8194, 8197, | |
| 8200, 8202, 8209, 8212, 8224, 8257, 8260, 8272, 8320, 8352, 8449, 8452, 8464, 8512, 8520, 8549, | |
| 8704, 8738, 8832, 8872, 9217, 9220, 9232, 9257, 9280, 9472, 9537, 9554, 9625, 9729, 9754, 9894, | |
| 10240, 10248, 10250, 10272, 10325, 10376, 10402, 10600, 10640, 10760, 10784, 10882, 10888, 10890, 16385, 16388, | |
| 16390, 16393, 16400, 16402, 16405, 16408, 16417, 16420, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16480, | |
| 16485, 16513, 16516, 16528, 16640, 16642, 16645, 16648, 16657, 16660, 16672, 16705, 16708, 16720, 16768, 16773, | |
| 16802, 16897, 16900, 16912, 16914, 16937, 16960, 17408, 17410, 17413, 17416, 17425, 17428, 17433, 17440, 17473, | |
| 17476, 17488, 17536, 17556, 17665, 17668, 17680, 17700, 17728, 17818, 17920, 17930, 17988, 18000, 18433, 18436, | |
| 18448, 18496, 18501, 18516, 18530, 18688, 18705, 18756, 18768, 18793, 18948, 20480, 20482, 20485, 20488, 20497, | |
| 20500, 20512, 20520, 20545, 20548, 20560, 20608, 20737, 20740, 20752, 20757, 20800, 20802, 20992, 21060, 21162, | |
| 21505, 21508, 21520, 21537, 21568, 21600, 21633, 21665, 21760, 21768, 21888, 21896, 22049, 22120, 22177, 22528, | |
| 22548, 22593, 22608, 22681, 22810, 22848, 22850, 23173, 24577, 24580, 24592, 24640, 24660, 24674, 24710, 24745, | |
| 24832, 25124, 25162, 25234, 25600, 25622, 25872, 25920, 25925, 26020, 26625, 26730, 26917, 27142, 27220, 27234, | |
| 32768, 32770, 32773, 32776, 32785, 32788, 32800, 32810, 32833, 32836, 32848, 32896, 32898, 32936, 32938, 33025, | |
| 33028, 33030, 33040, 33088, 33105, 33113, 33280, 33312, 33408, 33410, 33440, 33448, 33793, 33796, 33808, 33810, | |
| 33813, 33856, 33888, 33929, 34048, 34116, 34213, 34328, 34410, 34816, 34824, 34853, 34906, 34944, 34946, 34984, | |
| 35078, 35362, 35456, 35464, 35478, 35496, 36865, 36868, 36880, 36928, 36950, 36996, 37120, 37154, 37220, 37462, | |
| 37513, 37888, 37893, 37956, 37968, 37976, 38185, 38288, 38290, 38465, 38993, 39078, 39241, 39445, 39520, 40960, | |
| 40962, 40968, 40970, 40992, 41002, 41120, 41297, 41305, 41382, 41472, 41474, 41480, 41514, 41600, 41632, 42048, | |
| 42133, 42597, 42648, 43018, 43040, 43042, 43048, 43168, 43176, 43268, 43396, 43398, 43560, 43562, 43665, 43690, | |
| }; | |
| static const uint16_t kgrid_1bit_512[512] = { | |
| 10, 33, 41, 85, 132, 134, 160, 162, 277, 337, 340, 345, 357, 405, 516, 545, | |
| 553, 598, 641, 650, 681, 1042, 1044, 1097, 1169, 1176, 1320, 1345, 1365, 1378, 1434, 1444, | |
| 1545, 1617, 1642, 1685, 2053, 2080, 2089, 2133, 2176, 2182, 2208, 2214, 2306, 2384, 2393, 2440, | |
| 2453, 2581, 2664, 2690, 2721, 4117, 4161, 4182, 4184, 4261, 4357, 4369, 4372, 4377, 4390, 4422, | |
| 4432, 4437, 4449, 4457, 4485, 4497, 4505, 4629, 4677, 4696, 4774, 5205, 5217, 5225, 5386, 5397, | |
| 5409, 5445, 5457, 5460, 5461, 5462, 5465, 5472, 5477, 5525, 5545, 5650, 5668, 5717, 5729, 5769, | |
| 5777, 6212, 6234, 6244, 6293, 6424, 6482, 6485, 6502, 6505, 6529, 6538, 6565, 6656, 6682, 6788, | |
| 6806, 6820, 8218, 8224, 8226, 8232, 8277, 8326, 8354, 8469, 8521, 8530, 8549, 8596, 8737, 8794, | |
| 9221, 9253, 9348, 9369, 9380, 9474, 9557, 9633, 9732, 9753, 9793, 9830, 9862, 9880, 10240, 10272, | |
| 10282, 10321, 10406, 10517, 10530, 10566, 10585, 10645, 10896, 16466, 16468, 16473, 16485, 16646, 16660, 16665, | |
| 16725, 16793, 16806, 16914, 16969, 16977, 16996, 17028, 17057, 17408, 17416, 17434, 17493, 17512, 17578, 17685, | |
| 17696, 17733, 17745, 17748, 17749, 17750, 17753, 17765, 17794, 17813, 17946, 17984, 18005, 18072, 18453, 18529, | |
| 18569, 18722, 18756, 18762, 18773, 18794, 18833, 18853, 18945, 19026, 19033, 19077, 20489, 20497, 20500, 20517, | |
| 20565, 20586, 20610, 20633, 20757, 20769, 20776, 20805, 20817, 20820, 20821, 20822, 20825, 20837, 20864, 20872, | |
| 20885, 20896, 21002, 21029, 21077, 21146, 21510, 21525, 21573, 21585, 21588, 21589, 21590, 21593, 21605, 21653, | |
| 21665, 21765, 21777, 21780, 21781, 21782, 21785, 21797, 21825, 21828, 21829, 21830, 21833, 21840, 21841, 21842, | |
| 21844, 21846, 21848, 21849, 21850, 21857, 21860, 21861, 21862, 21865, 21893, 21905, 21908, 21909, 21910, 21913, | |
| 21925, 22024, 22037, 22085, 22097, 22100, 22101, 22102, 22105, 22117, 22165, 22545, 22566, 22568, 22594, 22608, | |
| 22613, 22676, 22697, 22793, 22805, 22853, 22865, 22868, 22869, 22870, 22873, 22885, 22933, 22946, 23046, 23072, | |
| 23125, 23209, 24597, 24640, 24665, 24673, 24725, 24833, 24840, 24869, 24917, 24934, 24965, 25001, 25108, 25110, | |
| 25152, 25184, 25192, 25234, 25616, 25618, 25625, 25685, 25704, 25738, 25744, 25770, 25877, 25897, 25925, 25937, | |
| 25940, 25941, 25942, 25945, 25957, 25986, 26005, 26186, 26197, 26276, 26632, 26634, 26725, 26757, 26770, 26885, | |
| 26965, 26976, 26986, 27032, 27153, 27174, 27200, 27208, 27240, 27269, 27282, 27290, 32778, 32800, 32802, 32808, | |
| 32810, 32853, 32904, 32922, 32930, 32932, 33105, 33110, 33112, 33125, 33157, 33280, 33288, 33301, 33312, 33320, | |
| 33424, 33797, 33829, 33858, 34068, 34133, 34146, 34176, 34217, 34306, 34342, 34441, 34454, 34468, 34832, 34918, | |
| 34965, 34984, 35094, 35137, 35161, 35208, 35232, 35332, 35338, 35368, 35429, 36932, 36934, 36953, 37009, 37125, | |
| 37136, 37138, 37145, 37157, 37205, 37220, 37258, 37290, 37444, 37446, 37465, 37478, 37525, 37905, 37968, 37973, | |
| 38040, 38054, 38145, 38154, 38165, 38180, 38186, 38213, 38225, 38228, 38229, 38230, 38233, 38245, 38293, 38485, | |
| 38504, 38530, 38938, 38985, 38993, 39012, 39040, 39173, 39192, 39253, 39265, 39301, 39316, 39322, 39442, 39497, | |
| 39504, 39590, 40970, 40984, 40992, 41002, 41045, 41120, 41128, 41237, 41289, 41297, 41317, 41364, 41366, 41514, | |
| 41557, 41633, 41989, 42021, 42056, 42068, 42074, 42113, 42242, 42265, 42274, 42325, 42340, 42402, 42501, 42512, | |
| 42533, 42624, 42632, 42666, 43040, 43093, 43106, 43168, 43176, 43264, 43286, 43345, 43429, 43590, 43618, 43680, | |
| }; | |
| const int kmap_size = 43692; | |
| const int nwant = type == GGML_TYPE_IQ1_S ? 3 : 2; | |
| const uint16_t * kgrid = type == GGML_TYPE_IQ2_XXS ? kgrid_2bit_256 : | |
| type == GGML_TYPE_IQ2_XS ? kgrid_2bit_512 : kgrid_1bit_512; | |
| uint64_t * kgrid_q2xs; | |
| int * kmap_q2xs; | |
| uint16_t * kneighbors_q2xs; | |
| printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size); | |
| uint64_t * the_grid = (uint64_t *)malloc(grid_size*sizeof(uint64_t)); | |
| for (int k = 0; k < grid_size; ++k) { | |
| int8_t * pos = (int8_t *)(the_grid + k); | |
| for (int i = 0; i < 8; ++i) { | |
| int l = (kgrid[k] >> 2*i) & 0x3; | |
| pos[i] = 2*l + 1; | |
| } | |
| } | |
| kgrid_q2xs = the_grid; | |
| iq2_data[gindex].grid = the_grid; | |
| kmap_q2xs = (int *)malloc(kmap_size*sizeof(int)); | |
| iq2_data[gindex].map = kmap_q2xs; | |
| for (int i = 0; i < kmap_size; ++i) kmap_q2xs[i] = -1; | |
| uint64_t aux64; | |
| uint8_t * aux8 = (uint8_t *)&aux64; | |
| for (int i = 0; i < grid_size; ++i) { | |
| aux64 = kgrid_q2xs[i]; | |
| uint16_t index = 0; | |
| for (int k=0; k<8; ++k) { | |
| uint16_t q = (aux8[k] - 1)/2; | |
| index |= (q << 2*k); | |
| } | |
| kmap_q2xs[index] = i; | |
| } | |
| int8_t pos[8]; | |
| int * dist2 = (int *)malloc(2*grid_size*sizeof(int)); | |
| int num_neighbors = 0, num_not_in_map = 0; | |
| for (int i = 0; i < kmap_size; ++i) { | |
| if (kmap_q2xs[i] >= 0) continue; | |
| ++num_not_in_map; | |
| for (int k = 0; k < 8; ++k) { | |
| int l = (i >> 2*k) & 0x3; | |
| pos[k] = 2*l + 1; | |
| } | |
| for (int j = 0; j < grid_size; ++j) { | |
| const int8_t * pg = (const int8_t *)(kgrid_q2xs + j); | |
| int d2 = 0; | |
| for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]); | |
| dist2[2*j+0] = d2; | |
| dist2[2*j+1] = j; | |
| } | |
| qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func); | |
| int n = 0; int d2 = dist2[0]; | |
| int nhave = 1; | |
| for (int j = 0; j < grid_size; ++j) { | |
| if (dist2[2*j] > d2) { | |
| if (nhave == nwant) break; | |
| d2 = dist2[2*j]; | |
| ++nhave; | |
| } | |
| ++n; | |
| } | |
| num_neighbors += n; | |
| } | |
| printf("%s: %d neighbours in total\n", __func__, num_neighbors); | |
| kneighbors_q2xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t)); | |
| iq2_data[gindex].neighbours = kneighbors_q2xs; | |
| int counter = 0; | |
| for (int i = 0; i < kmap_size; ++i) { | |
| if (kmap_q2xs[i] >= 0) continue; | |
| for (int k = 0; k < 8; ++k) { | |
| int l = (i >> 2*k) & 0x3; | |
| pos[k] = 2*l + 1; | |
| } | |
| for (int j = 0; j < grid_size; ++j) { | |
| const int8_t * pg = (const int8_t *)(kgrid_q2xs + j); | |
| int d2 = 0; | |
| for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]); | |
| dist2[2*j+0] = d2; | |
| dist2[2*j+1] = j; | |
| } | |
| qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func); | |
| kmap_q2xs[i] = -(counter + 1); | |
| int d2 = dist2[0]; | |
| uint16_t * start = &kneighbors_q2xs[counter++]; | |
| int n = 0, nhave = 1; | |
| for (int j = 0; j < grid_size; ++j) { | |
| if (dist2[2*j] > d2) { | |
| if (nhave == nwant) break; | |
| d2 = dist2[2*j]; | |
| ++nhave; | |
| } | |
| kneighbors_q2xs[counter++] = dist2[2*j+1]; | |
| ++n; | |
| } | |
| *start = n; | |
| } | |
| free(dist2); | |
| } | |
| void iq2xs_free_impl(enum ggml_type type) { | |
| GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S); | |
| const int gindex = iq2_data_index(type); | |
| if (iq2_data[gindex].grid) { | |
| free(iq2_data[gindex].grid); iq2_data[gindex].grid = NULL; | |
| free(iq2_data[gindex].map); iq2_data[gindex].map = NULL; | |
| free(iq2_data[gindex].neighbours); iq2_data[gindex].neighbours = NULL; | |
| } | |
| } | |
| static int iq2_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid, | |
| const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) { | |
| int num_neighbors = neighbours[0]; | |
| GGML_ASSERT(num_neighbors > 0); | |
| float best_d2 = FLT_MAX; | |
| int grid_index = -1; | |
| for (int j = 1; j <= num_neighbors; ++j) { | |
| const int8_t * pg = (const int8_t *)(grid + neighbours[j]); | |
| float d2 = 0; | |
| for (int i = 0; i < 8; ++i) { | |
| float q = pg[i]; | |
| float diff = scale*q - xval[i]; | |
| d2 += weight[i]*diff*diff; | |
| } | |
| if (d2 < best_d2) { | |
| best_d2 = d2; grid_index = neighbours[j]; | |
| } | |
| } | |
| GGML_ASSERT(grid_index >= 0); | |
| const int8_t * pg = (const int8_t *)(grid + grid_index); | |
| for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2; | |
| return grid_index; | |
| } | |
| static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) { | |
| const int gindex = iq2_data_index(GGML_TYPE_IQ2_XXS); | |
| const uint64_t * kgrid_q2xs = iq2_data[gindex].grid; | |
| const int * kmap_q2xs = iq2_data[gindex].map; | |
| const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours; | |
| GGML_ASSERT(quant_weights && "missing quantization weights"); | |
| GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(n%QK_K == 0); | |
| const int kMaxQ = 3; | |
| const int nbl = n/256; | |
| block_iq2_xxs * y = vy; | |
| float scales[QK_K/32]; | |
| float weight[32]; | |
| float xval[32]; | |
| int8_t L[32]; | |
| int8_t Laux[32]; | |
| float waux[32]; | |
| uint8_t block_signs[4]; | |
| uint32_t q2[2*(QK_K/32)]; | |
| for (int ibl = 0; ibl < nbl; ++ibl) { | |
| y[ibl].d = GGML_FP32_TO_FP16(0.f); | |
| memset(q2, 0, QK_K/4); | |
| float max_scale = 0; | |
| const float * xbl = x + QK_K*ibl; | |
| float sumx2 = 0; | |
| for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; | |
| float sigma2 = sumx2/QK_K; | |
| for (int ib = 0; ib < QK_K/32; ++ib) { | |
| const float * xb = xbl + 32*ib; | |
| const float * qw = quant_weights + QK_K*ibl + 32*ib; | |
| for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); | |
| for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]); | |
| for (int k = 0; k < 4; ++k) { | |
| int nflip = 0; | |
| uint8_t s = 0; | |
| for (int i = 0; i < 8; ++i) { | |
| if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i]; | |
| else { | |
| xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i); | |
| } | |
| } | |
| if (nflip%2) { | |
| int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin]; | |
| for (int i = 1; i < 8; ++i) { | |
| float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i]; | |
| if (ax < min) { | |
| min = ax; imin = i; | |
| } | |
| } | |
| xval[8*k+imin] = -xval[8*k+imin]; | |
| s ^= (1 << imin); | |
| } | |
| block_signs[k] = s & 127; | |
| } | |
| float max = xval[0]; | |
| for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]); | |
| if (!max) { | |
| scales[ib] = 0; | |
| memset(L, 0, 32); | |
| continue; | |
| } | |
| float scale = make_qp_quants(32, kMaxQ+1, xval, (uint8_t*)L, weight); | |
| float eff_max = scale*kMaxQ; | |
| float best = 0; | |
| for (int is = -6; is <= 6; ++is) { | |
| float id = (2*kMaxQ-1+is*0.1f)/eff_max; | |
| float this_scale = 1/id; | |
| for (int k = 0; k < 4; ++k) { | |
| for (int i = 0; i < 8; ++i) { | |
| int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); | |
| Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l)); | |
| } | |
| uint16_t u = 0; | |
| for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i); | |
| int grid_index = kmap_q2xs[u]; | |
| if (grid_index < 0) { | |
| const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; | |
| grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k); | |
| } | |
| } | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < 32; ++i) { | |
| float w = weight[i]; | |
| float q = 2*Laux[i] + 1; | |
| sumqx += w*xval[i]*q; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { | |
| scale = sumqx/sumq2; best = scale*sumqx; | |
| memcpy(L, Laux, 32); | |
| } | |
| } | |
| if (scale > 0) { | |
| float id = 1/scale; | |
| for (int k = 0; k < 4; ++k) { | |
| uint16_t u = 0; | |
| for (int i = 0; i < 8; ++i) { | |
| int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); | |
| l = MAX(0, MIN(kMaxQ-1, l)); | |
| u |= (l << 2*i); | |
| } | |
| int grid_index = kmap_q2xs[u]; | |
| if (grid_index < 0) { | |
| const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; | |
| grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k); | |
| } | |
| const int8_t * pg = (const int8_t *)(kgrid_q2xs + grid_index); | |
| for (int i = 0; i < 8; ++i) L[8*k+i] = (pg[i] - 1)/2; | |
| } | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < 32; ++i) { | |
| float w = weight[i]; | |
| float q = 2*L[i] + 1; | |
| sumqx += w*xval[i]*q; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumq2 > 0) scale = sumqx/sumq2; | |
| } | |
| if (scale < 0) { | |
| // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale) | |
| // and correspondingly flip quant signs. | |
| scale = -scale; | |
| for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127; | |
| } | |
| for (int k = 0; k < 4; ++k) { | |
| uint16_t u = 0; | |
| for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i); | |
| int grid_index = kmap_q2xs[u]; | |
| if (grid_index < 0) { | |
| printf("Oops: found point %u not on grid:", u); | |
| for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]); | |
| printf("\n"); | |
| GGML_ASSERT(false); | |
| } | |
| q2[2*ib+0] |= (grid_index << 8*k); | |
| q2[2*ib+1] |= (block_signs[k] << 7*k); | |
| } | |
| GGML_ASSERT(scale >= 0); | |
| scales[ib] = scale; | |
| max_scale = MAX(max_scale, scale); | |
| } | |
| if (!max_scale) { | |
| memset(y[ibl].qs, 0, QK_K/4); | |
| continue; | |
| } | |
| float d = max_scale/31; | |
| y[ibl].d = GGML_FP32_TO_FP16(d); | |
| float id = 1/d; | |
| for (int ib = 0; ib < QK_K/32; ++ib) { | |
| int l = nearest_int(0.5f*(id*scales[ib]-1)); | |
| l = MAX(0, MIN(15, l)); | |
| q2[2*ib+1] |= ((uint32_t)l << 28); | |
| } | |
| memcpy(y[ibl].qs, q2, QK_K/4); | |
| } | |
| } | |
| static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) { | |
| const int gindex = iq2_data_index(GGML_TYPE_IQ2_XS); | |
| const uint64_t * kgrid_q2xs = iq2_data[gindex].grid; | |
| const int * kmap_q2xs = iq2_data[gindex].map; | |
| const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours; | |
| GGML_ASSERT(quant_weights && "missing quantization weights"); | |
| GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(n%QK_K == 0); | |
| const int kMaxQ = 3; | |
| const int nbl = n/256; | |
| block_iq2_xs * y = vy; | |
| float scales[QK_K/16]; | |
| float weight[16]; | |
| float xval[16]; | |
| int8_t L[16]; | |
| int8_t Laux[16]; | |
| float waux[16]; | |
| bool is_on_grid[2]; | |
| bool is_on_grid_aux[2]; | |
| uint8_t block_signs[2]; | |
| uint16_t q2[2*(QK_K/16)]; | |
| for (int ibl = 0; ibl < nbl; ++ibl) { | |
| y[ibl].d = GGML_FP32_TO_FP16(0.f); | |
| memset(q2, 0, QK_K/4); | |
| memset(y[ibl].scales, 0, QK_K/32); | |
| float max_scale = 0; | |
| const float * xbl = x + QK_K*ibl; | |
| float sumx2 = 0; | |
| for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; | |
| float sigma2 = sumx2/QK_K; | |
| for (int ib = 0; ib < QK_K/16; ++ib) { | |
| const float * xb = xbl + 16*ib; | |
| const float * qw = quant_weights + QK_K*ibl + 16*ib; | |
| for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); | |
| for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]); | |
| for (int k = 0; k < 2; ++k) { | |
| int nflip = 0; | |
| uint8_t s = 0; | |
| for (int i = 0; i < 8; ++i) { | |
| if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i]; | |
| else { | |
| xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i); | |
| } | |
| } | |
| if (nflip%2) { | |
| int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin]; | |
| for (int i = 1; i < 8; ++i) { | |
| float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i]; | |
| if (ax < min) { | |
| min = ax; imin = i; | |
| } | |
| } | |
| xval[8*k+imin] = -xval[8*k+imin]; | |
| s ^= (1 << imin); | |
| } | |
| block_signs[k] = s & 127; | |
| } | |
| float max = xval[0]; | |
| for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]); | |
| if (!max) { | |
| scales[ib] = 0; | |
| memset(L, 0, 16); | |
| continue; | |
| } | |
| float best = 0; | |
| float scale = max/(2*kMaxQ-1); | |
| is_on_grid[0] = is_on_grid[1] = true; | |
| for (int is = -9; is <= 9; ++is) { | |
| float id = (2*kMaxQ-1+is*0.1f)/max; | |
| float this_scale = 1/id; | |
| for (int k = 0; k < 2; ++k) { | |
| for (int i = 0; i < 8; ++i) { | |
| int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); | |
| Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l)); | |
| } | |
| uint16_t u = 0; | |
| for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i); | |
| int grid_index = kmap_q2xs[u]; | |
| is_on_grid_aux[k] = true; | |
| if (grid_index < 0) { | |
| is_on_grid_aux[k] = false; | |
| const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; | |
| grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k); | |
| } | |
| } | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < 16; ++i) { | |
| float w = weight[i]; | |
| float q = 2*Laux[i] + 1; | |
| sumqx += w*xval[i]*q; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { | |
| scale = sumqx/sumq2; best = scale*sumqx; | |
| for (int i = 0; i < 16; ++i) L[i] = Laux[i]; | |
| for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k]; | |
| } | |
| } | |
| int n_not_ongrid = 0; | |
| for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid; | |
| if (n_not_ongrid > 0 && scale > 0) { | |
| float id = 1/scale; | |
| for (int k = 0; k < 2; ++k) { | |
| if (is_on_grid[k]) continue; | |
| uint16_t u = 0; | |
| for (int i = 0; i < 8; ++i) { | |
| int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); | |
| l = MAX(0, MIN(kMaxQ-1, l)); | |
| u |= (l << 2*i); | |
| L[8*k + i] = l; | |
| } | |
| int grid_index = kmap_q2xs[u]; | |
| if (grid_index < 0) { | |
| const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; | |
| grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k); | |
| } | |
| } | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < 16; ++i) { | |
| float w = weight[i]; | |
| float q = 2*L[i] + 1; | |
| sumqx += w*xval[i]*q; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumq2 > 0) scale = sumqx/sumq2; | |
| } | |
| if (scale < 0) { | |
| scale = -scale; | |
| for (int k = 0; k < 2; ++k) block_signs[k] = (~block_signs[k]) & 127; | |
| } | |
| for (int k = 0; k < 2; ++k) { | |
| uint16_t u = 0; | |
| for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i); | |
| int grid_index = kmap_q2xs[u]; | |
| if (grid_index < 0) { | |
| printf("Oops: found point %u not on grid:", u); | |
| for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]); | |
| printf("\n"); | |
| GGML_ASSERT(false); | |
| } | |
| q2[2*ib+k] = grid_index | (block_signs[k] << 9); | |
| } | |
| GGML_ASSERT(scale >= 0); | |
| scales[ib] = scale; | |
| max_scale = MAX(max_scale, scale); | |
| } | |
| if (!max_scale) { | |
| memset(y[ibl].qs, 0, QK_K/4); | |
| continue; | |
| } | |
| float d = max_scale/31; | |
| y[ibl].d = GGML_FP32_TO_FP16(d); | |
| float id = 1/d; | |
| for (int ib = 0; ib < QK_K/16; ++ib) { | |
| int l = nearest_int(0.5f*(id*scales[ib]-1)); | |
| l = MAX(0, MIN(15, l)); | |
| if (ib%2 == 0) y[ibl].scales[ib/2] = l; | |
| else y[ibl].scales[ib/2] |= (l << 4); | |
| } | |
| memcpy(y[ibl].qs, q2, QK_K/4); | |
| } | |
| } | |
| size_t quantize_iq2_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| GGML_ASSERT(n_per_row%QK_K == 0); | |
| int nblock = n_per_row/QK_K; | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_iq2_xxs_impl(src, qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += nblock*sizeof(block_iq2_xxs); | |
| } | |
| return nrow * nblock * sizeof(block_iq2_xxs); | |
| } | |
| size_t quantize_iq2_xs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| GGML_ASSERT(n_per_row%QK_K == 0); | |
| int nblock = n_per_row/QK_K; | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_iq2_xs_impl(src, qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += nblock*sizeof(block_iq2_xs); | |
| } | |
| return nrow * nblock * sizeof(block_iq2_xs); | |
| } | |
| // | |
| // ============================================= 3-bit using D4 lattice | |
| // | |
| typedef struct { | |
| uint32_t * grid; | |
| int * map; | |
| uint16_t * neighbours; | |
| } iq3_entry_t; | |
| static iq3_entry_t iq3_data[2] = { | |
| {NULL, NULL, NULL}, | |
| {NULL, NULL, NULL}, | |
| }; | |
| static inline int iq3_data_index(int grid_size) { | |
| (void)grid_size; | |
| GGML_ASSERT(grid_size == 256 || grid_size == 512); | |
| return grid_size == 256 ? 0 : 1; | |
| } | |
| static int iq3_compare_func(const void * left, const void * right) { | |
| const int * l = (const int *)left; | |
| const int * r = (const int *)right; | |
| return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0; | |
| } | |
| void iq3xs_init_impl(int grid_size) { | |
| const int gindex = iq3_data_index(grid_size); | |
| if (iq3_data[gindex].grid) { | |
| return; | |
| } | |
| static const uint16_t kgrid_256[256] = { | |
| 0, 2, 4, 9, 11, 15, 16, 18, 25, 34, 59, 61, 65, 67, 72, 74, | |
| 81, 85, 88, 90, 97, 108, 120, 128, 130, 132, 137, 144, 146, 153, 155, 159, | |
| 169, 175, 189, 193, 199, 200, 202, 213, 248, 267, 287, 292, 303, 315, 317, 321, | |
| 327, 346, 362, 413, 436, 456, 460, 462, 483, 497, 513, 515, 520, 522, 529, 531, | |
| 536, 538, 540, 551, 552, 576, 578, 585, 592, 594, 641, 643, 648, 650, 657, 664, | |
| 698, 704, 706, 720, 729, 742, 758, 769, 773, 808, 848, 852, 870, 889, 901, 978, | |
| 992, 1024, 1026, 1033, 1035, 1040, 1042, 1046, 1049, 1058, 1089, 1091, 1093, 1096, 1098, 1105, | |
| 1112, 1139, 1143, 1144, 1152, 1154, 1161, 1167, 1168, 1170, 1183, 1184, 1197, 1217, 1224, 1228, | |
| 1272, 1276, 1309, 1323, 1347, 1367, 1377, 1404, 1473, 1475, 1486, 1509, 1537, 1544, 1546, 1553, | |
| 1555, 1576, 1589, 1594, 1600, 1602, 1616, 1625, 1636, 1638, 1665, 1667, 1672, 1685, 1706, 1722, | |
| 1737, 1755, 1816, 1831, 1850, 1856, 1862, 1874, 1901, 1932, 1950, 1971, 2011, 2032, 2052, 2063, | |
| 2077, 2079, 2091, 2095, 2172, 2192, 2207, 2208, 2224, 2230, 2247, 2277, 2308, 2345, 2356, 2389, | |
| 2403, 2424, 2501, 2504, 2506, 2520, 2570, 2593, 2616, 2624, 2630, 2646, 2669, 2700, 2714, 2746, | |
| 2754, 2795, 2824, 2835, 2839, 2874, 2882, 2905, 2984, 3028, 3042, 3092, 3108, 3110, 3124, 3153, | |
| 3185, 3215, 3252, 3288, 3294, 3364, 3397, 3434, 3483, 3523, 3537, 3587, 3589, 3591, 3592, 3610, | |
| 3626, 3670, 3680, 3722, 3749, 3754, 3776, 3789, 3803, 3824, 3857, 3873, 3904, 3906, 3924, 3992, | |
| }; | |
| static const uint16_t kgrid_512[512] = { | |
| 0, 1, 2, 5, 7, 8, 9, 10, 12, 14, 16, 17, 21, 27, 32, 34, | |
| 37, 39, 41, 43, 48, 50, 57, 60, 63, 64, 65, 66, 68, 72, 73, 77, | |
| 80, 83, 87, 89, 93, 100, 113, 117, 122, 128, 129, 133, 135, 136, 139, 142, | |
| 145, 149, 152, 156, 162, 165, 167, 169, 171, 184, 187, 195, 201, 205, 208, 210, | |
| 217, 219, 222, 228, 232, 234, 247, 249, 253, 256, 267, 271, 273, 276, 282, 288, | |
| 291, 297, 312, 322, 324, 336, 338, 342, 347, 353, 357, 359, 374, 379, 390, 393, | |
| 395, 409, 426, 441, 448, 450, 452, 464, 466, 470, 475, 488, 492, 512, 513, 514, | |
| 516, 520, 521, 523, 525, 527, 528, 530, 537, 540, 542, 556, 558, 561, 570, 576, | |
| 577, 579, 582, 584, 588, 593, 600, 603, 609, 616, 618, 632, 638, 640, 650, 653, | |
| 655, 656, 660, 666, 672, 675, 685, 688, 698, 705, 708, 711, 712, 715, 721, 727, | |
| 728, 732, 737, 754, 760, 771, 773, 778, 780, 793, 795, 802, 806, 808, 812, 833, | |
| 840, 843, 849, 856, 858, 873, 912, 916, 919, 932, 934, 961, 963, 968, 970, 977, | |
| 989, 993, 1010, 1016, 1024, 1025, 1027, 1029, 1031, 1032, 1034, 1036, 1038, 1041, 1043, 1047, | |
| 1048, 1050, 1057, 1059, 1061, 1064, 1066, 1079, 1080, 1083, 1085, 1088, 1090, 1096, 1099, 1103, | |
| 1106, 1109, 1113, 1116, 1122, 1129, 1153, 1156, 1159, 1169, 1171, 1176, 1183, 1185, 1195, 1199, | |
| 1209, 1212, 1216, 1218, 1221, 1225, 1234, 1236, 1241, 1243, 1250, 1256, 1270, 1281, 1287, 1296, | |
| 1299, 1306, 1309, 1313, 1338, 1341, 1348, 1353, 1362, 1375, 1376, 1387, 1400, 1408, 1410, 1415, | |
| 1425, 1453, 1457, 1477, 1481, 1494, 1496, 1507, 1512, 1538, 1545, 1547, 1549, 1551, 1554, 1561, | |
| 1563, 1565, 1570, 1572, 1575, 1577, 1587, 1593, 1601, 1603, 1605, 1612, 1617, 1619, 1632, 1648, | |
| 1658, 1662, 1664, 1674, 1680, 1690, 1692, 1704, 1729, 1736, 1740, 1745, 1747, 1751, 1752, 1761, | |
| 1763, 1767, 1773, 1787, 1795, 1801, 1806, 1810, 1817, 1834, 1840, 1844, 1857, 1864, 1866, 1877, | |
| 1882, 1892, 1902, 1915, 1934, 1953, 1985, 1987, 2000, 2002, 2013, 2048, 2052, 2058, 2064, 2068, | |
| 2071, 2074, 2081, 2088, 2104, 2114, 2119, 2121, 2123, 2130, 2136, 2141, 2147, 2153, 2157, 2177, | |
| 2179, 2184, 2189, 2193, 2203, 2208, 2223, 2226, 2232, 2244, 2249, 2251, 2256, 2258, 2265, 2269, | |
| 2304, 2306, 2324, 2335, 2336, 2361, 2373, 2375, 2385, 2418, 2443, 2460, 2480, 2504, 2509, 2520, | |
| 2531, 2537, 2562, 2568, 2572, 2578, 2592, 2596, 2599, 2602, 2614, 2620, 2625, 2627, 2629, 2634, | |
| 2641, 2650, 2682, 2688, 2697, 2707, 2712, 2718, 2731, 2754, 2759, 2760, 2775, 2788, 2793, 2805, | |
| 2811, 2817, 2820, 2832, 2842, 2854, 2890, 2902, 2921, 2923, 2978, 3010, 3012, 3026, 3081, 3083, | |
| 3085, 3097, 3099, 3120, 3136, 3152, 3159, 3188, 3210, 3228, 3234, 3245, 3250, 3256, 3264, 3276, | |
| 3281, 3296, 3349, 3363, 3378, 3392, 3395, 3420, 3440, 3461, 3488, 3529, 3531, 3584, 3588, 3591, | |
| 3600, 3602, 3614, 3616, 3628, 3634, 3650, 3657, 3668, 3683, 3685, 3713, 3716, 3720, 3726, 3729, | |
| 3736, 3753, 3778, 3802, 3805, 3819, 3841, 3845, 3851, 3856, 3880, 3922, 3938, 3970, 3993, 4032, | |
| }; | |
| const int kmap_size = 4096; | |
| const int nwant = grid_size == 256 ? 2 : 3; | |
| const uint16_t * kgrid = grid_size == 256 ? kgrid_256 : kgrid_512; | |
| uint32_t * kgrid_q3xs; | |
| int * kmap_q3xs; | |
| uint16_t * kneighbors_q3xs; | |
| printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size); | |
| uint32_t * the_grid = (uint32_t *)malloc(grid_size*sizeof(uint32_t)); | |
| for (int k = 0; k < grid_size; ++k) { | |
| int8_t * pos = (int8_t *)(the_grid + k); | |
| for (int i = 0; i < 4; ++i) { | |
| int l = (kgrid[k] >> 3*i) & 0x7; | |
| pos[i] = 2*l + 1; | |
| } | |
| } | |
| kgrid_q3xs = the_grid; | |
| iq3_data[gindex].grid = the_grid; | |
| kmap_q3xs = (int *)malloc(kmap_size*sizeof(int)); | |
| iq3_data[gindex].map = kmap_q3xs; | |
| for (int i = 0; i < kmap_size; ++i) kmap_q3xs[i] = -1; | |
| uint32_t aux32; | |
| uint8_t * aux8 = (uint8_t *)&aux32; | |
| for (int i = 0; i < grid_size; ++i) { | |
| aux32 = kgrid_q3xs[i]; | |
| uint16_t index = 0; | |
| for (int k=0; k<4; ++k) { | |
| uint16_t q = (aux8[k] - 1)/2; | |
| index |= (q << 3*k); | |
| } | |
| kmap_q3xs[index] = i; | |
| } | |
| int8_t pos[4]; | |
| int * dist2 = (int *)malloc(2*grid_size*sizeof(int)); | |
| int num_neighbors = 0, num_not_in_map = 0; | |
| for (int i = 0; i < kmap_size; ++i) { | |
| if (kmap_q3xs[i] >= 0) continue; | |
| ++num_not_in_map; | |
| for (int k = 0; k < 4; ++k) { | |
| int l = (i >> 3*k) & 0x7; | |
| pos[k] = 2*l + 1; | |
| } | |
| for (int j = 0; j < grid_size; ++j) { | |
| const int8_t * pg = (const int8_t *)(kgrid_q3xs + j); | |
| int d2 = 0; | |
| for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]); | |
| dist2[2*j+0] = d2; | |
| dist2[2*j+1] = j; | |
| } | |
| qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func); | |
| int n = 0; int d2 = dist2[0]; | |
| int nhave = 1; | |
| for (int j = 0; j < grid_size; ++j) { | |
| if (dist2[2*j] > d2) { | |
| if (nhave == nwant) break; | |
| d2 = dist2[2*j]; | |
| ++nhave; | |
| } | |
| ++n; | |
| } | |
| num_neighbors += n; | |
| } | |
| printf("%s: %d neighbours in total\n", __func__, num_neighbors); | |
| kneighbors_q3xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t)); | |
| iq3_data[gindex].neighbours = kneighbors_q3xs; | |
| int counter = 0; | |
| for (int i = 0; i < kmap_size; ++i) { | |
| if (kmap_q3xs[i] >= 0) continue; | |
| for (int k = 0; k < 4; ++k) { | |
| int l = (i >> 3*k) & 0x7; | |
| pos[k] = 2*l + 1; | |
| } | |
| for (int j = 0; j < grid_size; ++j) { | |
| const int8_t * pg = (const int8_t *)(kgrid_q3xs + j); | |
| int d2 = 0; | |
| for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]); | |
| dist2[2*j+0] = d2; | |
| dist2[2*j+1] = j; | |
| } | |
| qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func); | |
| kmap_q3xs[i] = -(counter + 1); | |
| int d2 = dist2[0]; | |
| uint16_t * start = &kneighbors_q3xs[counter++]; | |
| int n = 0, nhave = 1; | |
| for (int j = 0; j < grid_size; ++j) { | |
| if (dist2[2*j] > d2) { | |
| if (nhave == nwant) break; | |
| d2 = dist2[2*j]; | |
| ++nhave; | |
| } | |
| kneighbors_q3xs[counter++] = dist2[2*j+1]; | |
| ++n; | |
| } | |
| *start = n; | |
| } | |
| free(dist2); | |
| } | |
| void iq3xs_free_impl(int grid_size) { | |
| GGML_ASSERT(grid_size == 256 || grid_size == 512); | |
| const int gindex = iq3_data_index(grid_size); | |
| if (iq3_data[gindex].grid) { | |
| free(iq3_data[gindex].grid); iq3_data[gindex].grid = NULL; | |
| free(iq3_data[gindex].map); iq3_data[gindex].map = NULL; | |
| free(iq3_data[gindex].neighbours); iq3_data[gindex].neighbours = NULL; | |
| } | |
| } | |
| static int iq3_find_best_neighbour(const uint16_t * restrict neighbours, const uint32_t * restrict grid, | |
| const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) { | |
| int num_neighbors = neighbours[0]; | |
| GGML_ASSERT(num_neighbors > 0); | |
| float best_d2 = FLT_MAX; | |
| int grid_index = -1; | |
| for (int j = 1; j <= num_neighbors; ++j) { | |
| const int8_t * pg = (const int8_t *)(grid + neighbours[j]); | |
| float d2 = 0; | |
| for (int i = 0; i < 4; ++i) { | |
| float q = pg[i]; | |
| float diff = scale*q - xval[i]; | |
| d2 += weight[i]*diff*diff; | |
| } | |
| if (d2 < best_d2) { | |
| best_d2 = d2; grid_index = neighbours[j]; | |
| } | |
| } | |
| GGML_ASSERT(grid_index >= 0); | |
| const int8_t * pg = (const int8_t *)(grid + grid_index); | |
| for (int i = 0; i < 4; ++i) L[i] = (pg[i] - 1)/2; | |
| return grid_index; | |
| } | |
| static void quantize_row_iq3_xxs_impl(int grid_size, const float * restrict x, void * restrict vy, int n, | |
| const float * restrict quant_weights) { | |
| const int gindex = iq3_data_index(grid_size); | |
| const uint32_t * kgrid_q3xs = iq3_data[gindex].grid; | |
| const int * kmap_q3xs = iq3_data[gindex].map; | |
| const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours; | |
| //GGML_ASSERT(quant_weights && "missing quantization weights"); | |
| GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(n%QK_K == 0); | |
| const int kMaxQ = 8; | |
| const int nbl = n/QK_K; | |
| ggml_fp16_t * dh; | |
| uint8_t * qs; | |
| int block_size; | |
| if (grid_size == 256) { | |
| block_iq3_xxs * y = vy; | |
| dh = &y->d; | |
| qs = y->qs; | |
| block_size = sizeof(block_iq3_xxs); | |
| } else { | |
| block_iq3_s * y = vy; | |
| dh = &y->d; | |
| qs = y->qs; | |
| block_size = sizeof(block_iq3_s); | |
| } | |
| int quant_size = block_size - sizeof(ggml_fp16_t); | |
| float scales[QK_K/32]; | |
| float weight[32]; | |
| float xval[32]; | |
| int8_t L[32]; | |
| int8_t Laux[32]; | |
| float waux[32]; | |
| bool is_on_grid[8]; | |
| bool is_on_grid_aux[8]; | |
| uint8_t block_signs[8]; | |
| uint8_t q3[3*(QK_K/8)+QK_K/32]; | |
| uint32_t * scales_and_signs = (uint32_t *)(q3 + QK_K/4); | |
| uint8_t * qh = q3 + 3*(QK_K/8); | |
| for (int ibl = 0; ibl < nbl; ++ibl) { | |
| dh[0] = GGML_FP32_TO_FP16(0.f); | |
| memset(q3, 0, 3*QK_K/8+QK_K/32); | |
| float max_scale = 0; | |
| const float * xbl = x + QK_K*ibl; | |
| float sumx2 = 0; | |
| for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; | |
| float sigma2 = 2*sumx2/QK_K; | |
| for (int ib = 0; ib < QK_K/32; ++ib) { | |
| const float * xb = xbl + 32*ib; | |
| if (quant_weights) { | |
| const float * qw = quant_weights + QK_K*ibl + 32*ib; | |
| for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); | |
| } else { | |
| for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i]; | |
| } | |
| for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]); | |
| for (int k = 0; k < 4; ++k) { | |
| int nflip = 0; | |
| uint8_t s = 0; | |
| for (int i = 0; i < 8; ++i) { | |
| if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i]; | |
| else { | |
| xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i); | |
| } | |
| } | |
| if (nflip%2) { | |
| int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin]; | |
| for (int i = 1; i < 8; ++i) { | |
| float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i]; | |
| if (ax < min) { | |
| min = ax; imin = i; | |
| } | |
| } | |
| xval[8*k+imin] = -xval[8*k+imin]; | |
| s ^= (1 << imin); | |
| } | |
| block_signs[k] = s & 127; | |
| } | |
| float max = xval[0]; | |
| for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]); | |
| if (!max) { | |
| scales[ib] = 0; | |
| memset(L, 0, 32); | |
| continue; | |
| } | |
| float best = 0; | |
| float scale = max/(2*kMaxQ-1); | |
| for (int is = -15; is <= 15; ++is) { | |
| float id = (2*kMaxQ-1+is*0.2f)/max; | |
| float this_scale = 1/id; | |
| for (int k = 0; k < 8; ++k) { | |
| for (int i = 0; i < 4; ++i) { | |
| int l = nearest_int(0.5f*(id*xval[4*k+i]-1)); | |
| Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l)); | |
| } | |
| uint16_t u = 0; | |
| for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i); | |
| int grid_index = kmap_q3xs[u]; | |
| is_on_grid_aux[k] = true; | |
| if (grid_index < 0) { | |
| is_on_grid_aux[k] = false; | |
| const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1; | |
| grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k); | |
| } | |
| } | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < 32; ++i) { | |
| float w = weight[i]; | |
| float q = 2*Laux[i] + 1; | |
| sumqx += w*xval[i]*q; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { | |
| scale = sumqx/sumq2; best = scale*sumqx; | |
| for (int i = 0; i < 32; ++i) L[i] = Laux[i]; | |
| for (int k = 0; k < 8; ++k) is_on_grid[k] = is_on_grid_aux[k]; | |
| } | |
| } | |
| int n_not_ongrid = 0; | |
| for (int k = 0; k < 8; ++k) if (!is_on_grid[k]) ++n_not_ongrid; | |
| if (n_not_ongrid > 0 && scale > 0) { | |
| float id = 1/scale; | |
| for (int k = 0; k < 8; ++k) { | |
| if (is_on_grid[k]) continue; | |
| uint16_t u = 0; | |
| for (int i = 0; i < 4; ++i) { | |
| int l = nearest_int(0.5f*(id*xval[4*k+i]-1)); | |
| l = MAX(0, MIN(kMaxQ-1, l)); | |
| u |= (l << 3*i); | |
| } | |
| int grid_index = kmap_q3xs[u]; | |
| if (grid_index < 0) { | |
| const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1; | |
| grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k); | |
| } | |
| const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index); | |
| for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2; | |
| } | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < 32; ++i) { | |
| float w = weight[i]; | |
| float q = 2*L[i] + 1; | |
| sumqx += w*xval[i]*q; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumq2 > 0) scale = sumqx/sumq2; | |
| } | |
| if (scale < 0) { | |
| // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale) | |
| // and correspondingly flip quant signs. | |
| scale = -scale; | |
| for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127; | |
| } | |
| for (int k = 0; k < 8; ++k) { | |
| uint16_t u = 0; | |
| for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i); | |
| int grid_index = kmap_q3xs[u]; | |
| if (grid_index < 0) { | |
| printf("Oops: found point %u not on grid:", u); | |
| for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]); | |
| printf("\n"); | |
| GGML_ASSERT(false); | |
| } | |
| if (grid_size == 256) { | |
| q3[8*ib+k] = grid_index; | |
| } else { | |
| q3[8*ib+k] = grid_index & 255; | |
| qh[ib] |= ((grid_index >> 8) << k); | |
| } | |
| } | |
| scales_and_signs[ib] = block_signs[0] | (block_signs[1] << 7) | (block_signs[2] << 14) | (block_signs[3] << 21); | |
| GGML_ASSERT(scale >= 0); | |
| scales[ib] = scale; | |
| max_scale = MAX(max_scale, scale); | |
| } | |
| if (!max_scale) { | |
| memset(qs, 0, quant_size); | |
| dh += block_size/sizeof(ggml_fp16_t); | |
| qs += block_size; | |
| continue; | |
| } | |
| float d = max_scale/31; | |
| dh[0] = GGML_FP32_TO_FP16(d * 1.0125f); // small improvement via this fudge factor | |
| float id = 1/d; | |
| for (int ib = 0; ib < QK_K/32; ++ib) { | |
| int l = nearest_int(0.5f*(id*scales[ib]-1)); | |
| l = MAX(0, MIN(15, l)); | |
| scales_and_signs[ib] |= ((uint32_t)l << 28); | |
| } | |
| memcpy(qs, q3, quant_size); | |
| dh += block_size/sizeof(ggml_fp16_t); | |
| qs += block_size; | |
| } | |
| } | |
| size_t quantize_iq3_xxs(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| GGML_ASSERT(n_per_row%QK_K == 0); | |
| int nblock = n_per_row/QK_K; | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_iq3_xxs_impl(256, src, qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += nblock*sizeof(block_iq3_xxs); | |
| } | |
| return nrow * nblock * sizeof(block_iq3_xxs); | |
| } | |
| void quantize_row_iq3_xxs(const float * restrict x, void * restrict vy, int k) { | |
| assert(k % QK_K == 0); | |
| block_iq3_xxs * restrict y = vy; | |
| quantize_row_iq3_xxs_reference(x, y, k); | |
| } | |
| void quantize_row_iq3_xxs_reference(const float * restrict x, block_iq3_xxs * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| quantize_row_iq3_xxs_impl(256, x, y, k, NULL); | |
| } | |
| static void quantize_row_iq3_s_impl(int block_size, const float * restrict x, void * restrict vy, int n, | |
| const float * restrict quant_weights, | |
| float * scales, | |
| float * weight, | |
| float * xval, | |
| int8_t * L, | |
| int8_t * Laux, | |
| float * waux, | |
| bool * is_on_grid, | |
| bool * is_on_grid_aux, | |
| uint8_t * block_signs) { | |
| const int gindex = iq3_data_index(512); | |
| const uint32_t * kgrid_q3xs = iq3_data[gindex].grid; | |
| const int * kmap_q3xs = iq3_data[gindex].map; | |
| const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours; | |
| //GGML_ASSERT(quant_weights && "missing quantization weights"); | |
| GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(n%QK_K == 0); | |
| const int kMaxQ = 8; | |
| const int nbl = n/QK_K; | |
| block_iq3_s * y = vy; | |
| const int bs4 = block_size/4; | |
| const int bs8 = block_size/8; | |
| for (int ibl = 0; ibl < nbl; ++ibl) { | |
| memset(&y[ibl], 0, sizeof(block_iq3_s)); | |
| y[ibl].d = GGML_FP32_TO_FP16(0.f); | |
| uint8_t * qs = y[ibl].qs; | |
| uint8_t * qh = y[ibl].qh; | |
| uint8_t * signs = y[ibl].signs; | |
| float max_scale = 0; | |
| const float * xbl = x + QK_K*ibl; | |
| float sumx2 = 0; | |
| for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; | |
| float sigma2 = 2*sumx2/QK_K; | |
| for (int ib = 0; ib < QK_K/block_size; ++ib) { | |
| const float * xb = xbl + block_size*ib; | |
| if (quant_weights) { | |
| const float * qw = quant_weights + QK_K*ibl + block_size*ib; | |
| for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); | |
| } else { | |
| for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i]; | |
| } | |
| for (int i = 0; i < block_size; ++i) waux[i] = sqrtf(weight[i]); | |
| for (int k = 0; k < bs8; ++k) { | |
| uint8_t s = 0; | |
| for (int i = 0; i < 8; ++i) { | |
| if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i]; | |
| else { | |
| xval[8*k + i] = -xb[8*k + i]; s |= (1 << i); | |
| } | |
| } | |
| block_signs[k] = s; | |
| } | |
| float max = xval[0]; | |
| for (int i = 1; i < block_size; ++i) max = MAX(max, xval[i]); | |
| if (!max) { | |
| scales[ib] = 0; | |
| continue; | |
| } | |
| float best = 0; | |
| float scale = max/(2*kMaxQ-1); | |
| for (int is = -15; is <= 15; ++is) { | |
| float id = (2*kMaxQ-1+is*0.2f)/max; | |
| float this_scale = 1/id; | |
| for (int k = 0; k < bs4; ++k) { | |
| for (int i = 0; i < 4; ++i) { | |
| int l = nearest_int(0.5f*(id*xval[4*k+i]-1)); | |
| Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l)); | |
| } | |
| uint16_t u = 0; | |
| for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i); | |
| int grid_index = kmap_q3xs[u]; | |
| is_on_grid_aux[k] = true; | |
| if (grid_index < 0) { | |
| is_on_grid_aux[k] = false; | |
| const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1; | |
| grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k); | |
| } | |
| } | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < block_size; ++i) { | |
| float w = weight[i]; | |
| float q = 2*Laux[i] + 1; | |
| sumqx += w*xval[i]*q; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { | |
| scale = sumqx/sumq2; best = scale*sumqx; | |
| for (int i = 0; i < block_size; ++i) L[i] = Laux[i]; | |
| for (int k = 0; k < bs4; ++k) is_on_grid[k] = is_on_grid_aux[k]; | |
| } | |
| } | |
| int n_not_ongrid = 0; | |
| for (int k = 0; k < bs4; ++k) if (!is_on_grid[k]) ++n_not_ongrid; | |
| if (n_not_ongrid > 0 && scale > 0) { | |
| float id = 1/scale; | |
| for (int k = 0; k < bs4; ++k) { | |
| if (is_on_grid[k]) continue; | |
| uint16_t u = 0; | |
| for (int i = 0; i < 4; ++i) { | |
| int l = nearest_int(0.5f*(id*xval[4*k+i]-1)); | |
| l = MAX(0, MIN(kMaxQ-1, l)); | |
| u |= (l << 3*i); | |
| } | |
| int grid_index = kmap_q3xs[u]; | |
| if (grid_index < 0) { | |
| const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1; | |
| grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k); | |
| } | |
| const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index); | |
| for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2; | |
| } | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < block_size; ++i) { | |
| float w = weight[i]; | |
| float q = 2*L[i] + 1; | |
| sumqx += w*xval[i]*q; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumq2 > 0) scale = sumqx/sumq2; | |
| } | |
| if (scale < 0) { | |
| // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale) | |
| // and correspondingly flip quant signs. | |
| scale = -scale; | |
| for (int k = 0; k < bs8; ++k) block_signs[k] = ~block_signs[k]; | |
| } | |
| for (int k = 0; k < bs4; ++k) { | |
| uint16_t u = 0; | |
| for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i); | |
| int grid_index = kmap_q3xs[u]; | |
| if (grid_index < 0) { | |
| printf("Oops: found point %u not on grid:", u); | |
| for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]); | |
| printf("\n"); | |
| GGML_ASSERT(false); | |
| } | |
| qs[k] = grid_index & 255; | |
| qh[(ib*bs4+k)/8] |= ((grid_index >> 8) << ((ib*bs4+k)%8)); | |
| } | |
| qs += bs4; | |
| for (int k = 0; k < bs8; ++k) signs[k] = block_signs[k]; | |
| signs += bs8; | |
| GGML_ASSERT(scale >= 0); | |
| scales[ib] = scale; | |
| max_scale = MAX(max_scale, scale); | |
| } | |
| if (!max_scale) { | |
| continue; | |
| } | |
| float d = max_scale/31; | |
| y[ibl].d = GGML_FP32_TO_FP16(d); | |
| float id = 1/d; | |
| for (int ib = 0; ib < QK_K/block_size; ib += 2) { | |
| int l1 = nearest_int(0.5f*(id*scales[ib+0]-1)); | |
| l1 = MAX(0, MIN(15, l1)); | |
| int l2 = nearest_int(0.5f*(id*scales[ib+1]-1)); | |
| l2 = MAX(0, MIN(15, l2)); | |
| y[ibl].scales[ib/2] = l1 | (l2 << 4); | |
| } | |
| } | |
| } | |
| size_t quantize_iq3_s(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| GGML_ASSERT(n_per_row%QK_K == 0); | |
| int nblock = n_per_row/QK_K; | |
| float scales[QK_K/IQ3S_BLOCK_SIZE]; | |
| float weight[IQ3S_BLOCK_SIZE]; | |
| float xval[IQ3S_BLOCK_SIZE]; | |
| int8_t L[IQ3S_BLOCK_SIZE]; | |
| int8_t Laux[IQ3S_BLOCK_SIZE]; | |
| float waux[IQ3S_BLOCK_SIZE]; | |
| bool is_on_grid[IQ3S_BLOCK_SIZE/4]; | |
| bool is_on_grid_aux[IQ3S_BLOCK_SIZE/4]; | |
| uint8_t block_signs[IQ3S_BLOCK_SIZE/8]; | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_iq3_s_impl(IQ3S_BLOCK_SIZE, src, qrow, n_per_row, quant_weights, | |
| scales, weight, xval, L, Laux, waux, is_on_grid, is_on_grid_aux, block_signs); | |
| src += n_per_row; | |
| qrow += nblock*sizeof(block_iq3_s); | |
| } | |
| return nrow * nblock * sizeof(block_iq3_s); | |
| } | |
| void quantize_row_iq3_s(const float * restrict x, void * restrict vy, int k) { | |
| assert(k % QK_K == 0); | |
| block_iq3_s * restrict y = vy; | |
| quantize_row_iq3_s_reference(x, y, k); | |
| } | |
| void quantize_row_iq3_s_reference(const float * restrict x, block_iq3_s * restrict y, int k) { | |
| assert(k % QK_K == 0); | |
| quantize_iq3_s(x, y, 1, k, NULL, NULL); | |
| } | |
| // =================================== 1.5 bpw =================================================== | |
| static int iq1_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid, | |
| const float * restrict xval, const float * restrict weight, float * scale, int8_t * restrict L, int ngrid) { | |
| int num_neighbors = neighbours[0]; | |
| GGML_ASSERT(num_neighbors > 0); | |
| float best_score = 0; | |
| int grid_index = -1; | |
| for (int j = 1; j <= num_neighbors; ++j) { | |
| const int8_t * pg = (const int8_t *)(grid + neighbours[j]); | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < 8; ++i) { | |
| float q = (pg[i] - 3)/2; | |
| float w = weight[i]; | |
| sumqx += w*q*xval[i]; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) { | |
| *scale = sumqx/sumq2; best_score = *scale * sumqx; | |
| grid_index = neighbours[j]; | |
| } | |
| } | |
| if (grid_index < 0) { | |
| for (int i = 0; i < ngrid; ++i) { | |
| const int8_t * grid_i = (const int8_t *)(grid + i); | |
| float sumqx = 0, sumq2 = 0; | |
| for (int j = 0; j < 8; ++j) { | |
| float w = weight[j]; | |
| float q = (grid_i[j] - 3)/2; | |
| sumqx += w*q*xval[j]; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) { | |
| *scale = sumqx/sumq2; best_score = *scale*sumqx; | |
| grid_index = i; | |
| } | |
| } | |
| } | |
| if (grid_index < 0) { | |
| printf("Oops, did not find grid point\n"); | |
| printf("Have %d neighbours\n", num_neighbors); | |
| for (int j = 1; j <= num_neighbors; ++j) { | |
| const int8_t * pg = (const int8_t *)(grid + neighbours[j]); | |
| float sumqx = 0, sumq2 = 0; | |
| for (int i = 0; i < 8; ++i) { | |
| float q = (pg[i] - 3)/2; | |
| float w = weight[i]; | |
| sumqx += w*q*xval[i]; | |
| sumq2 += w*q*q; | |
| } | |
| printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2); | |
| } | |
| } | |
| GGML_ASSERT(grid_index >= 0); | |
| //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! | |
| *scale *= 1.05f; // This is a fudge factor. Don't ask me why it improves the result. | |
| //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! | |
| const int8_t * pg = (const int8_t *)(grid + grid_index); | |
| for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2; | |
| return grid_index; | |
| } | |
| static int iq1_sort_helper(const void * left, const void * right) { | |
| const float * l = left; | |
| const float * r = right; | |
| return *l < *r ? -1 : *l > *r ? 1 : 0; | |
| } | |
| static void quantize_row_iq1_s_impl(const float * restrict x, void * restrict vy, int n, const float * restrict quant_weights) { | |
| const int gindex = iq2_data_index(GGML_TYPE_IQ1_S); | |
| const uint64_t * kgrid_q2xs = iq2_data[gindex].grid; | |
| const int * kmap_q2xs = iq2_data[gindex].map; | |
| const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours; | |
| GGML_ASSERT(quant_weights && "missing quantization weights"); | |
| GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?"); | |
| GGML_ASSERT(n%QK_K == 0); | |
| const int nbl = n/256; | |
| block_iq1_s * y = vy; | |
| float scales[QK_K/8]; | |
| float weight[8]; | |
| int8_t L[8]; | |
| float sumx[9]; | |
| float sumw[9]; | |
| float pairs[16]; | |
| int * idx = (int *)(pairs + 1); | |
| uint8_t hbit[QK_K/8]; | |
| for (int ibl = 0; ibl < nbl; ++ibl) { | |
| y[ibl].d = GGML_FP32_TO_FP16(0.f); | |
| memset(y[ibl].qs, 0, QK_K/8); | |
| memset(y[ibl].scales, 0, QK_K/16); | |
| float max_scale = 0; | |
| const float * xbl = x + QK_K*ibl; | |
| float sumx2 = 0; | |
| for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; | |
| float sigma2 = sumx2/QK_K; | |
| for (int ib = 0; ib < QK_K/8; ++ib) { | |
| const float * xb = xbl + 8*ib; | |
| const float * qw = quant_weights + QK_K*ibl + 8*ib; | |
| for (int i = 0; i < 8; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); | |
| float max = fabsf(xb[0]); | |
| for (int i = 1; i < 8; ++i) max = MAX(max, fabsf(xb[i])); | |
| if (!max) { | |
| scales[ib] = 0; | |
| memset(L, 1, 8); | |
| continue; | |
| } | |
| // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem. | |
| // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two | |
| // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights | |
| // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and | |
| // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale | |
| // for each possible and score for each split. | |
| for (int j = 0; j < 8; ++j) { | |
| pairs[2*j] = xb[j]; | |
| idx[2*j] = j; | |
| } | |
| qsort(pairs, 8, 2*sizeof(float), iq1_sort_helper); | |
| { | |
| sumx[0] = sumw[0] = 0; | |
| for (int j = 0; j < 8; ++j) { | |
| int i = idx[2*j]; | |
| sumx[j+1] = sumx[j] + weight[i]*xb[i]; | |
| sumw[j+1] = sumw[j] + weight[i]; | |
| } | |
| } | |
| float best_score = 0, scale = max; | |
| int besti1 = 0, besti2 = 0; | |
| for (int i1 = 0; i1 <= 8; ++i1) { | |
| for (int i2 = i1; i2 <= 8; ++i2) { | |
| float sumqx = -(sumx[i1] - sumx[0]) + (sumx[8] - sumx[i2]); | |
| float sumq2 = (sumw[i1] - sumw[0]) + (sumw[8] - sumw[i2]); | |
| if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) { | |
| scale = sumqx/sumq2; best_score = scale*sumqx; | |
| besti1 = i1; besti2 = i2; | |
| } | |
| } | |
| } | |
| for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0; | |
| for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1; | |
| for (int j = besti2; j < 8; ++j) L[idx[2*j]] = 2; | |
| if (scale < 0) { | |
| for (int j = 0; j < 8; ++j) L[j] = 2 - L[j]; | |
| scale = -scale; | |
| } | |
| // Now we check if the solution found above corresponds to a grid point and, if not, use a neighbouring | |
| // grid point that minimizes SSD. | |
| uint16_t u = 0; | |
| for (int j = 0; j < 8; ++j) u |= (L[j] << 2*j); | |
| int grid_index = kmap_q2xs[u]; | |
| if (grid_index < 0) { | |
| const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; | |
| grid_index = iq1_find_best_neighbour(neighbours, kgrid_q2xs, xb, weight, &scale, L, NGRID_IQ2XXS); | |
| GGML_ASSERT(grid_index >= 0); | |
| } | |
| y[ibl].qs[ib] = grid_index & 255; | |
| hbit[ib] = grid_index >> 8; | |
| GGML_ASSERT(scale >= 0); | |
| scales[ib] = scale; | |
| max_scale = MAX(max_scale, scale); | |
| } | |
| if (!max_scale) { | |
| memset(y[ibl].qs, 0, QK_K/8); | |
| continue; | |
| } | |
| float d = max_scale/15; | |
| y[ibl].d = GGML_FP32_TO_FP16(d*1.085f); // 1.085f is another fudge factor. Don't ask me why it is needed. | |
| float id = 1/d; | |
| for (int ib = 0; ib < QK_K/8; ++ib) { | |
| int l = nearest_int(0.5f*(id*scales[ib]-1)); | |
| l = MAX(0, MIN(7, l)); | |
| if (hbit[ib]) l |= 8; | |
| y[ibl].scales[ib/2] |= (l << 4*(ib%2)); | |
| } | |
| } | |
| } | |
| size_t quantize_iq1_s(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| GGML_ASSERT(n_per_row%QK_K == 0); | |
| int nblock = n_per_row/QK_K; | |
| char * qrow = (char *)dst; | |
| for (int row = 0; row < nrow; ++row) { | |
| quantize_row_iq1_s_impl(src, qrow, n_per_row, quant_weights); | |
| src += n_per_row; | |
| qrow += nblock*sizeof(block_iq1_s); | |
| } | |
| return nrow * nblock * sizeof(block_iq1_s); | |
| } | |
| // ============================ 4-bit non-linear quants | |
| static inline int best_index_int8(int n, const int8_t * val, float x) { | |
| if (x <= val[0]) return 0; | |
| if (x >= val[n-1]) return n-1; | |
| int ml = 0, mu = n-1; | |
| while (mu-ml > 1) { | |
| int mav = (ml+mu)/2; | |
| if (x < val[mav]) mu = mav; else ml = mav; | |
| } | |
| return x - val[mu-1] < val[mu] - x ? mu-1 : mu; | |
| } | |
| static void quantize_row_iq4_nl_impl(const int block_size, const float * GGML_RESTRICT x, | |
| ggml_fp16_t * dh, uint8_t * q4, | |
| float * weight, uint8_t * L, | |
| const int8_t * values, | |
| const float * quant_weights) { | |
| const int ntry = 7; | |
| float sigma2 = 0; | |
| for (int j = 0; j < QK4_NL; ++j) sigma2 += x[j]*x[j]; | |
| sigma2 *= 2.f/QK4_NL; | |
| const int nb = QK4_NL/block_size; | |
| memset(q4, 0, QK4_NL/2); | |
| for (int ib = 0; ib < nb; ++ib) { | |
| dh[ib] = GGML_FP32_TO_FP16(0.f); | |
| const float * xb = x + ib*block_size; | |
| if (quant_weights) { | |
| const float * qw = quant_weights + ib*block_size; | |
| for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); | |
| } else { | |
| for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j]; | |
| } | |
| float amax = 0, max = 0; | |
| for (int j = 0; j < block_size; ++j) { | |
| float ax = fabsf(xb[j]); | |
| if (ax > amax) { | |
| amax = ax; max = xb[j]; | |
| } | |
| } | |
| if (!amax) { | |
| continue; | |
| } | |
| float d = -max/values[0]; | |
| float id = 1/d; | |
| float sumqx = 0, sumq2 = 0; | |
| for (int j = 0; j < block_size; ++j) { | |
| float al = id*xb[j]; | |
| int l = best_index_int8(16, values, al); | |
| float q = values[l]; | |
| float w = weight[j]; | |
| sumqx += w*q*xb[j]; | |
| sumq2 += w*q*q; | |
| } | |
| float best_id = id; | |
| d = sumqx/sumq2; | |
| float best = d*sumqx; | |
| for (int itry = -ntry; itry <= ntry; ++itry) { | |
| id = (itry + values[0])/max; | |
| sumqx = sumq2 = 0; | |
| for (int j = 0; j < block_size; ++j) { | |
| float al = id*xb[j]; | |
| int l = best_index_int8(16, values, al); | |
| float q = values[l]; | |
| float w = weight[j]; | |
| sumqx += w*q*xb[j]; | |
| sumq2 += w*q*q; | |
| } | |
| if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { | |
| d = sumqx/sumq2; best = d * sumqx; | |
| best_id = id; | |
| } | |
| } | |
| dh[ib] = GGML_FP32_TO_FP16(d); | |
| for (int j = 0; j < block_size; ++j) { | |
| L[ib*block_size + j] = best_index_int8(16, values, best_id*xb[j]); | |
| } | |
| } | |
| for (int i = 0; i < QK4_NL/32; ++i) { | |
| for (int j = 0; j < 16; ++j) { | |
| q4[16*i + j] = L[32*i + j] | (L[32*i + 16 + j] << 4); | |
| } | |
| } | |
| } | |
| size_t quantize_iq4_nl(const float * src, void * dst, int nrow, int n_per_row, int64_t * hist, const float * quant_weights) { | |
| (void)hist; | |
| GGML_ASSERT(n_per_row%QK4_NL == 0); | |
| int nblock = n_per_row/QK4_NL; | |
| char * qrow = (char *)dst; | |
| uint8_t L[QK4_NL]; | |
| float weight[32]; | |
| for (int row = 0; row < nrow; ++row) { | |
| block_iq4_nl * iq4 = (block_iq4_nl *)qrow; | |
| for (int ibl = 0; ibl < nblock; ++ibl) { | |
| const float * qw = quant_weights ? quant_weights + QK4_NL*ibl : NULL; | |
| quantize_row_iq4_nl_impl(32, src + QK4_NL*ibl, &iq4[ibl].d, iq4[ibl].qs, weight, L, kvalues_iq4nl, qw); | |
| } | |
| src += n_per_row; | |
| qrow += nblock*sizeof(block_iq4_nl); | |
| } | |
| return nrow * nblock * sizeof(block_iq4_nl); | |
| } | |
| void quantize_row_iq4_nl(const float * restrict x, void * restrict vy, int k) { | |
| assert(k % QK4_NL == 0); | |
| block_iq4_nl * restrict y = vy; | |
| quantize_row_iq4_nl_reference(x, y, k); | |
| } | |
| void quantize_row_iq4_nl_reference(const float * restrict x, block_iq4_nl * restrict y, int k) { | |
| assert(k % QK4_NL == 0); | |
| quantize_iq4_nl(x, y, 1, k, NULL, NULL); | |
| } | |