#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" #include #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED #if defined(__POWER9_VECTOR__) #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) #define B8(c,s ) B7(c,s, c), B7(c,s, s) // precomputed tables for expanding 8bits to 8 bytes: static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 #endif void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0 * GGML_RESTRICT y = vy; #if defined(__POWER9_VECTOR__) for (int i = 0; i < nb; i++) { vector float srcv [8]; vector float asrcv[8]; vector float amaxv[8]; vector signed int vi[8]; for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); const float amax = MAX(MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)), MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3))); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; const vector float vid = vec_splats(id); y[i].d = GGML_CPU_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const vector float v = vec_round(vec_mul(srcv[j], vid)); vi[j] = vec_cts(v, 0); } vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_0_ref(x, y, k); #endif } void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK8_1 == 0); const int nb = k / QK8_1; block_q8_1 * GGML_RESTRICT y = vy; #if defined(__POWER9_VECTOR__) for (int i = 0; i < nb; i++) { vector float srcv [8]; vector float asrcv[8]; vector float amaxv[8]; vector signed int vi[8]; for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); const float amax = MAX(MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)), MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3))); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; const vector float vid = vec_splats(id); y[i].d = GGML_CPU_FP32_TO_FP16(d); vector int accv = vec_splats(0); for (int j = 0; j < 8; j++) { const vector float v = vec_round(vec_mul(srcv[j], vid)); vi[j] = vec_cts(v, 0); accv = vec_add(accv, vi[j]); } vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); accv = vec_add(accv, vec_sld(accv, accv, 4)); accv = vec_add(accv, vec_sld(accv, accv, 8)); y[i].s = GGML_CPU_FP32_TO_FP16(d * vec_extract(accv, 0)); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_1_ref(x, y, k); #endif } //===================================== Dot products ================================= void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); const vector signed char v8 = vec_splats((signed char)0x8); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 8 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl(16, y[ib].qs); vector signed char q4x0 = vec_and(qxs, lowMask); vector signed char q4x1 = vec_sr(qxs, v4); q4x0 = vec_sub(q4x0, v8); q4x1 = vec_sub(q4x1, v8); vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); vector signed int vsumi0 = v0; vsumi0 = vec_sum4s(qv0, vsumi0); vsumi0 = vec_sum4s(qv1, vsumi0); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); #endif for (; ib < nb; ++ib) { int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const int v0 = (x[ib].qs[j] & 0x0F) - 8; const int v1 = (x[ib].qs[j] >> 4) - 8; sumi0 += (v0 * y[ib].qs[j]); sumi1 += (v1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; } void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 4 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].m)); vector float vys = {GGML_CPU_FP16_TO_FP32(y[ib].s), 0.0f, 0.0f, 0.0f}; vsumf0 = vec_madd(vxmin, vys, vsumf0); vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl(16, y[ib].qs); vector unsigned char q4x0 = (vector unsigned char)vec_and(qxs, lowMask); vector unsigned char q4x1 = (vector unsigned char)vec_sr(qxs, v4); vector signed int vsumi0 = v0; vsumi0 = vec_msum(q8y0, q4x0, vsumi0); vsumi0 = vec_msum(q8y1, q4x1, vsumi0); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); #endif for (; ib < nb; ++ib) { int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const int v0 = (x[ib].qs[j] & 0x0F); const int v1 = (x[ib].qs[j] >> 4); sumi0 += (v0 * y[ib].qs[j]); sumi1 += (v1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; } void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector unsigned char v4 = vec_splats((unsigned char)4); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 4 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[ib].qh[0]]), (uint64_t)(table_b2b_1[x[ib].qh[1]])}; vector signed long long aux64x2_1 = {(uint64_t)(table_b2b_1[x[ib].qh[2]]), (uint64_t)(table_b2b_1[x[ib].qh[3]])}; vector signed char qh0 = (vector signed char)aux64x2_0; vector signed char qh1 = (vector signed char)aux64x2_1; vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector signed char q5x0 = vec_sub(vec_and (qxs, lowMask), qh0); vector signed char q5x1 = vec_sub(vec_sr(qxs, v4), qh1); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl( 16, y[ib].qs); vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1)); qv0 = vec_add(qv0, qv1); vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); #endif for (; ib < nb; ++ib) { uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); sumi0 += (x0 * y[ib].qs[j]); sumi1 += (x1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; } void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_1); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 4 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].m)); vector float vys = {GGML_CPU_FP16_TO_FP32(y[ib].s), 0.f, 0.f, 0.f}; vsumf0 = vec_madd(vxmin, vys, vsumf0); vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[ib].qh[0]]), (uint64_t)(table_b2b_0[x[ib].qh[1]])}; vector unsigned long long aux64x2_1 = {(uint64_t)(table_b2b_0[x[ib].qh[2]]), (uint64_t)(table_b2b_0[x[ib].qh[3]])}; vector signed char qh0 = (vector signed char)aux64x2_0; vector signed char qh1 = (vector signed char)aux64x2_1; vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector unsigned char q5x0 = (vector unsigned char)vec_or(vec_and(qxs, lowMask), qh0); vector unsigned char q5x1 = (vector unsigned char)vec_or(vec_sr(qxs, v4), qh1); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl( 16, y[ib].qs); vector signed int vsumi0 = v0; vsumi0 = vec_msum(q8y0, q5x0, vsumi0); vsumi0 = vec_msum(q8y1, q5x1, vsumi0); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); #endif for (; ib < nb; ++ib) { uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; sumi0 += (x0 * y[ib].qs[j]); sumi1 += (x1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; } void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q8_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__POWER9_VECTOR__) const vector signed int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 8 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed char q8x0 = vec_xl( 0, x[ib].qs); vector signed char q8x1 = vec_xl(16, x[ib].qs); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl(16, y[ib].qs); vector signed short qv0 = vec_mule(q8x0, q8y0); vector signed short qv1 = vec_mulo(q8x0, q8y0); vector signed short qv2 = vec_mule(q8x1, q8y1); vector signed short qv3 = vec_mulo(q8x1, q8y1); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vsumi0 = vec_sum4s(qv0, vsumi0); vsumi1 = vec_sum4s(qv1, vsumi1); vsumi0 = vec_sum4s(qv2, vsumi0); vsumi1 = vec_sum4s(qv3, vsumi1); vsumi0 = vec_add(vsumi0, vsumi1); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); #endif for (; ib < nb; ++ib) { int sumi = 0; for (int j = 0; j < qk; j++) { sumi += x[ib].qs[j]*y[ib].qs[j]; } sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; } void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q2_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0x3); const vector signed char lowScaleMask = vec_splats((signed char)0xF); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v2 = vec_splats((unsigned char)0x2); const vector unsigned char v6 = vec_splats((unsigned char)0x6); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin)); vector float vdmin = vec_mul(vxmin, vyd); vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); vector signed short q8ysums1 = vec_xl(16, y[i].bsums); vector signed char q2xmins = (vector signed char)vec_xl( 0, x[i].scales); vector signed char vscales = vec_and(q2xmins, lowScaleMask); q2xmins = vec_sr(q2xmins, v4); vector signed short q2xmins0 = vec_unpackh(q2xmins); vector signed short q2xmins1 = vec_unpackl(q2xmins); vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); vector signed int prod2 = vec_mule(q2xmins1, q8ysums1); vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1); vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; vector signed int vsumi4 = v0; vector signed int vsumi5 = v0; vector signed int vsumi6 = v0; vector signed int vsumi7 = v0; const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/128; ++j) { __builtin_prefetch(q2, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q2); vector signed char qxs1 = (vector signed char)vec_xl(16, q2); q2 += 32; vector unsigned char q2x00 = (vector unsigned char)vec_and(qxs0, lowMask); vector unsigned char q2x01 = (vector unsigned char)vec_and(vec_sr(qxs0, v2), lowMask); vector unsigned char q2x02 = (vector unsigned char)vec_and(vec_sr(qxs0, v4), lowMask); vector unsigned char q2x03 = (vector unsigned char)vec_and(vec_sr(qxs0, v6), lowMask); vector unsigned char q2x10 = (vector unsigned char)vec_and(qxs1, lowMask); vector unsigned char q2x11 = (vector unsigned char)vec_and(vec_sr(qxs1, v2), lowMask); vector unsigned char q2x12 = (vector unsigned char)vec_and(vec_sr(qxs1, v4), lowMask); vector unsigned char q2x13 = (vector unsigned char)vec_and(vec_sr(qxs1, v6), lowMask); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl( 16, q8); vector signed char q8y01 = vec_xl( 32, q8); vector signed char q8y11 = vec_xl( 48, q8); vector signed char q8y02 = vec_xl( 64, q8); vector signed char q8y12 = vec_xl( 80, q8); vector signed char q8y03 = vec_xl( 96, q8); vector signed char q8y13 = vec_xl(112, q8); q8 += 128; vector signed int qv0 = vec_msum(q8y00, q2x00, v0); vector signed int qv1 = vec_msum(q8y01, q2x01, v0); vector signed int qv2 = vec_msum(q8y02, q2x02, v0); vector signed int qv3 = vec_msum(q8y03, q2x03, v0); vector signed int qv4 = vec_msum(q8y10, q2x10, v0); vector signed int qv5 = vec_msum(q8y11, q2x11, v0); vector signed int qv6 = vec_msum(q8y12, q2x12, v0); vector signed int qv7 = vec_msum(q8y13, q2x13, v0); vector signed short vscales_07 = vec_unpackh(vscales); vector signed int vscales_03 = vec_unpackh(vscales_07); vector signed int vscales_47 = vec_unpackl(vscales_07); vector signed int vs0 = vec_splat(vscales_03, 0); vector signed int vs1 = vec_splat(vscales_03, 1); vector signed int vs2 = vec_splat(vscales_03, 2); vector signed int vs3 = vec_splat(vscales_03, 3); vector signed int vs4 = vec_splat(vscales_47, 0); vector signed int vs5 = vec_splat(vscales_47, 1); vector signed int vs6 = vec_splat(vscales_47, 2); vector signed int vs7 = vec_splat(vscales_47, 3); vscales = vec_sld(vscales, vscales, 8); vsumi0 = vec_add(vec_mul(qv0, vs0), vsumi0); vsumi1 = vec_add(vec_mul(qv1, vs2), vsumi1); vsumi2 = vec_add(vec_mul(qv2, vs4), vsumi2); vsumi3 = vec_add(vec_mul(qv3, vs6), vsumi3); vsumi4 = vec_add(vec_mul(qv4, vs1), vsumi4); vsumi5 = vec_add(vec_mul(qv5, vs3), vsumi5); vsumi6 = vec_add(vec_mul(qv6, vs5), vsumi6); vsumi7 = vec_add(vec_mul(qv7, vs7), vsumi7); } vsumi0 = vec_add(vsumi0, vsumi4); vsumi1 = vec_add(vsumi1, vsumi5); vsumi2 = vec_add(vsumi2, vsumi6); vsumi3 = vec_add(vsumi3, vsumi7); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * q2 = x[i].qs; const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; int summs = 0; for (int j = 0; j < 16; ++j) { summs += y[i].bsums[j] * (sc[j] >> 4); } const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; int d; for (int k = 0; k < QK_K/128; ++k) { int shift = 0; for (int j = 0; j < 4; ++j) { d = sc[is++] & 0xF; int isuml = 0; for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); isum += d * isuml; d = sc[is++] & 0xF; isuml = 0; for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); isum += d * isuml; shift += 2; q8 += 32; } q2 += 32; } sumf += dall * isum - dmin * summs; } *s = sumf; #endif } void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0x3); const vector signed char lowMask1 = vec_splats((int8_t)0xf); const vector signed char lowMask2 = vec_splats((int8_t)0x30); const vector int v0 = vec_splats((int32_t)0); const vector signed char v1 = vec_splats((signed char)0x1); const vector unsigned char v2 = vec_splats((unsigned char)0x2); const vector unsigned char v3 = vec_splats((unsigned char)0x3); const vector unsigned char v4 = vec_splats((unsigned char)0x4); const vector unsigned char v6 = vec_splats((unsigned char)0x6); const vector signed char off = vec_splats((signed char)0x20); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); UNUSED(kmask1); UNUSED(kmask2); vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); vector signed char u1 = vec_and(u0, lowMask1); vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); vector signed char u3 = (vector signed char)vec_mergeh((vector signed int)u2, (vector signed int)vec_sr(u2, v2)); vector signed char u30 = vec_sl(vec_and(u3, lowMask), v4); vector signed char u31 = vec_and(u3, lowMask2); u1 = vec_or(u1, u30); u2 = vec_or(vec_sr(u0, v4), u31); vector signed char vscales = (vector signed char)vec_mergeh((vector signed long long)u1, (vector signed long long)u2); vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask); vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask); vscales = vec_sub(vscales, off); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; vector signed int vsumi4 = v0; vector signed int vsumi5 = v0; vector signed int vsumi6 = v0; vector signed int vsumi7 = v0; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/128; ++j) { __builtin_prefetch(q3, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q3); vector signed char qxs1 = (vector signed char)vec_xl(16, q3); q3 += 32; //the low 2 bits vector signed char qxs00 = vec_and(qxs0, lowMask); vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask); vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask); vector signed char qxs10 = vec_and(qxs1, lowMask); vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask); vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask); vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask); //the 3rd bit vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2); vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2); vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2); vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2); vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2); vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2); qxhs0 = vec_sr(qxhs0, v4); qxhs1 = vec_sr(qxhs1, v4); vector signed char q3x00 = vec_sub(qxs00, qxh00); vector signed char q3x01 = vec_sub(qxs01, qxh01); vector signed char q3x02 = vec_sub(qxs02, qxh02); vector signed char q3x03 = vec_sub(qxs03, qxh03); vector signed char q3x10 = vec_sub(qxs10, qxh10); vector signed char q3x11 = vec_sub(qxs11, qxh11); vector signed char q3x12 = vec_sub(qxs12, qxh12); vector signed char q3x13 = vec_sub(qxs13, qxh13); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl( 16, q8); vector signed char q8y01 = vec_xl( 32, q8); vector signed char q8y11 = vec_xl( 48, q8); vector signed char q8y02 = vec_xl( 64, q8); vector signed char q8y12 = vec_xl( 80, q8); vector signed char q8y03 = vec_xl( 96, q8); vector signed char q8y13 = vec_xl(112, q8); q8 += 128; vector signed short vscales_h = vec_unpackh(vscales); vector signed short vs0 = vec_splat(vscales_h, 0); vector signed short vs1 = vec_splat(vscales_h, 1); vector signed short vs2 = vec_splat(vscales_h, 2); vector signed short vs3 = vec_splat(vscales_h, 3); vector signed short vs4 = vec_splat(vscales_h, 4); vector signed short vs5 = vec_splat(vscales_h, 5); vector signed short vs6 = vec_splat(vscales_h, 6); vector signed short vs7 = vec_splat(vscales_h, 7); vscales = vec_sld(vscales, vscales, 8); vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02)); vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03)); vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12)); vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13)); vsumi0 = vec_msum(qv00, vs0, vsumi0); vsumi1 = vec_msum(qv01, vs2, vsumi1); vsumi2 = vec_msum(qv02, vs4, vsumi2); vsumi3 = vec_msum(qv03, vs6, vsumi3); vsumi4 = vec_msum(qv10, vs1, vsumi4); vsumi5 = vec_msum(qv11, vs3, vsumi5); vsumi6 = vec_msum(qv12, vs5, vsumi6); vsumi7 = vec_msum(qv13, vs7, vsumi7); } vsumi0 = vec_add(vsumi0, vsumi4); vsumi1 = vec_add(vsumi1, vsumi5); vsumi2 = vec_add(vsumi2, vsumi6); vsumi3 = vec_add(vsumi3, vsumi7); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else // scalar version // This function is written like this so the compiler can manage to vectorize most of it // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the // manually vectorized version above. Every other version I tried would run at least 4 times slower. // The ideal situation would be if we could just write the code once, and the compiler would // automatically produce the best possible set of machine instructions, instead of us having to manually // write vectorized versions for AVX, ARM_NEON, etc. int8_t aux8[QK_K]; int16_t aux16[8]; float sums [8]; int32_t aux32[8]; memset(sums, 0, 8*sizeof(float)); uint32_t auxs[4]; const int8_t * scales = (const int8_t*)auxs; float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT hm = x[i].hmask; const int8_t * GGML_RESTRICT q8 = y[i].qs; memset(aux32, 0, 8*sizeof(int32_t)); int8_t * GGML_RESTRICT a = aux8; uint8_t m = 1; for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; q3 += 32; } a = aux8; memcpy(auxs, x[i].scales, 12); uint32_t tmp = auxs[2]; auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); for (int j = 0; j < QK_K/16; ++j) { for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; #endif } void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed char lowMask1 = vec_splats((int8_t)0x3f); const vector signed char lowMask2 = vec_splats((int8_t)0x30); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v2 = vec_splats((uint8_t)2); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin)); vector float vdmin = vec_mul(vxmin, vyd); vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); vector signed short q8ysums1 = vec_xl(16, y[i].bsums); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); vector signed char u3 = vec_sr(u2, v4); vector signed char u30 = u1; vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); u1 = vec_and(u0, lowMask1); u2 = vec_or(u30, u31); vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); vector signed short vscales = vec_unpackh(utmps); vector signed short q4xmins = vec_unpackl(utmps); vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins); vector signed short q4xmins1 = vec_mergel(q4xmins, q4xmins); vector signed int prod0 = vec_mule(q4xmins0, q8ysums0); vector signed int prod1 = vec_mule(q4xmins1, q8ysums1); vector signed int prod2 = vec_mulo(q4xmins0, q8ysums0); vector signed int prod3 = vec_mulo(q4xmins1, q8ysums1); vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/64; j+=2) { __builtin_prefetch(q4, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); vector signed char qxs1 = (vector signed char)vec_xl(16, q4); vector signed char qxs2 = (vector signed char)vec_xl(32, q4); vector signed char qxs3 = (vector signed char)vec_xl(48, q4); q4 += 64; vector unsigned char q4x00 = (vector unsigned char)vec_and(qxs0, lowMask); vector unsigned char q4x01 = (vector unsigned char)vec_sr(qxs0, v4); vector unsigned char q4x10 = (vector unsigned char)vec_and(qxs1, lowMask); vector unsigned char q4x11 = (vector unsigned char)vec_sr(qxs1, v4); vector unsigned char q4x20 = (vector unsigned char)vec_and(qxs2, lowMask); vector unsigned char q4x21 = (vector unsigned char)vec_sr(qxs2, v4); vector unsigned char q4x30 = (vector unsigned char)vec_and(qxs3, lowMask); vector unsigned char q4x31 = (vector unsigned char)vec_sr(qxs3, v4); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl( 16, q8); vector signed char q8y01 = vec_xl( 32, q8); vector signed char q8y11 = vec_xl( 48, q8); vector signed char q8y20 = vec_xl( 64, q8); vector signed char q8y30 = vec_xl( 80, q8); vector signed char q8y21 = vec_xl( 96, q8); vector signed char q8y31 = vec_xl(112, q8); q8 += 128; vector signed int qv00 = vec_msum(q8y00, q4x00, v0); vector signed int qv01 = vec_msum(q8y01, q4x01, v0); vector signed int qv10 = vec_msum(q8y10, q4x10, v0); vector signed int qv11 = vec_msum(q8y11, q4x11, v0); vector signed int qv20 = vec_msum(q8y20, q4x20, v0); vector signed int qv21 = vec_msum(q8y21, q4x21, v0); vector signed int qv30 = vec_msum(q8y30, q4x30, v0); vector signed int qv31 = vec_msum(q8y31, q4x31, v0); vector signed int vscales_h = vec_unpackh(vscales); vector signed int vs0 = vec_splat(vscales_h, 0); vector signed int vs1 = vec_splat(vscales_h, 1); vector signed int vs2 = vec_splat(vscales_h, 2); vector signed int vs3 = vec_splat(vscales_h, 3); vscales = vec_sld(vscales, vscales, 8); vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); vsumi1 = vec_add(vec_mul(qv01, vs1), vsumi1); vsumi2 = vec_add(vec_mul(qv20, vs2), vsumi2); vsumi3 = vec_add(vec_mul(qv21, vs3), vsumi3); vsumi0 = vec_add(vec_mul(qv10, vs0), vsumi0); vsumi1 = vec_add(vec_mul(qv11, vs1), vsumi1); vsumi2 = vec_add(vec_mul(qv30, vs2), vsumi2); vsumi3 = vec_add(vec_mul(qv31, vs3), vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else const uint8_t * scales = (const uint8_t*)&utmp[0]; const uint8_t * mins = (const uint8_t*)&utmp[2]; int8_t aux8[QK_K]; int16_t aux16[8]; float sums [8]; int32_t aux32[8]; memset(sums, 0, 8*sizeof(float)); float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; memset(aux32, 0, 8*sizeof(int32_t)); int8_t * GGML_RESTRICT a = aux8; for (int j = 0; j < QK_K/64; ++j) { for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); a += 32; for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); a += 32; q4 += 32; } memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; int sumi = 0; for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; a = aux8; int is = 0; for (int j = 0; j < QK_K/32; ++j) { int32_t scale = scales[is++]; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; #endif } void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed char lowMask1 = vec_splats((int8_t)0x3f); const vector signed char lowMask2 = vec_splats((int8_t)0x30); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v1 = vec_splats((unsigned char)0x1); const vector unsigned char v2 = vec_splats((unsigned char)0x2); const vector unsigned char v3 = vec_splats((unsigned char)0x3); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin)); vector float vdmin = vec_mul(vxmin, vyd); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); vector signed char u3 = vec_sr(u2, v4); vector signed char u30 = u1; vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); u1 = vec_and(u0, lowMask1); u2 = vec_or(u30, u31); vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); vector signed short q8ysums1 = vec_xl(16, y[i].bsums); vector signed short vscales = vec_unpackh(utmps); vector signed short q5xmins = vec_unpackl(utmps); vector signed short q5xmins0 = vec_mergeh(q5xmins, q5xmins); vector signed short q5xmins1 = vec_mergel(q5xmins, q5xmins); vector signed int prod0 = vec_mule(q5xmins0, q8ysums0); vector signed int prod1 = vec_mule(q5xmins1, q8ysums1); vector signed int prod2 = vec_mulo(q5xmins0, q8ysums0); vector signed int prod3 = vec_mulo(q5xmins1, q8ysums1); vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh); vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint8_t * GGML_RESTRICT q5 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/64; ++j) { __builtin_prefetch(q5, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q5); vector signed char qxs1 = (vector signed char)vec_xl(16, q5); q5 += 32; vector signed char qxs00 = vec_and(qxs0, lowMask); vector signed char qxs01 = vec_sr(qxs0, v4); vector signed char qxs10 = vec_and(qxs1, lowMask); vector signed char qxs11 = vec_sr(qxs1, v4); vector signed char q5h00 = vec_sl(vec_and((vector signed char)v1, qxhs0), v4); vector signed char q5h01 = vec_sl(vec_and((vector signed char)v2, qxhs0), v3); vector signed char q5h10 = vec_sl(vec_and((vector signed char)v1, qxhs1), v4); vector signed char q5h11 = vec_sl(vec_and((vector signed char)v2, qxhs1), v3); qxhs0 = vec_sr(qxhs0, v2); qxhs1 = vec_sr(qxhs1, v2); vector unsigned char q5x00 = (vector unsigned char)vec_or(q5h00, qxs00); vector unsigned char q5x01 = (vector unsigned char)vec_or(q5h01, qxs01); vector unsigned char q5x10 = (vector unsigned char)vec_or(q5h10, qxs10); vector unsigned char q5x11 = (vector unsigned char)vec_or(q5h11, qxs11); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl(16, q8); vector signed char q8y01 = vec_xl(32, q8); vector signed char q8y11 = vec_xl(48, q8); q8 += 64; vector signed int qv00 = vec_msum(q8y00, q5x00, v0); vector signed int qv01 = vec_msum(q8y01, q5x01, v0); vector signed int qv10 = vec_msum(q8y10, q5x10, v0); vector signed int qv11 = vec_msum(q8y11, q5x11, v0); vector signed int vscales_h = vec_unpackh(vscales); vector signed int vs0 = vec_splat(vscales_h, 0); vector signed int vs1 = vec_splat(vscales_h, 1); vscales = vec_sld(vscales, vscales, 12); vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); vsumi1 = vec_add(vec_mul(qv10, vs0), vsumi1); vsumi2 = vec_add(vec_mul(qv01, vs1), vsumi2); vsumi3 = vec_add(vec_mul(qv11, vs1), vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else const uint8_t * scales = (const uint8_t*)&utmp[0]; const uint8_t * mins = (const uint8_t*)&utmp[2]; int8_t aux8[QK_K]; int16_t aux16[8]; float sums [8]; int32_t aux32[8]; memset(sums, 0, 8*sizeof(float)); float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q4 = x[i].qs; const uint8_t * GGML_RESTRICT hm = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; memset(aux32, 0, 8*sizeof(int32_t)); int8_t * GGML_RESTRICT a = aux8; uint8_t m = 1; for (int j = 0; j < QK_K/64; ++j) { for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); a += 32; m <<= 1; q4 += 32; } memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; int sumi = 0; for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; a = aux8; int is = 0; for (int j = 0; j < QK_K/32; ++j) { int32_t scale = scales[is++]; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; #endif } void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q6_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v2 = vec_splats((unsigned char)0x2); const vector unsigned char v3 = vec_splats((unsigned char)0x3); const vector unsigned char v4 = vec_splats((unsigned char)0x4); const vector unsigned char v6 = vec_splats((unsigned char)0x6); const vector signed char off = vec_splats((signed char)0x20); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; vector signed int vsumi4 = v0; vector signed int vsumi5 = v0; vector signed int vsumi6 = v0; vector signed int vsumi7 = v0; const uint8_t * GGML_RESTRICT q6 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT qs = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/128; ++j) { __builtin_prefetch(q6, 0, 0); __builtin_prefetch(qh, 0, 0); __builtin_prefetch(q8, 0, 0); vector signed char qxs0 = (vector signed char)vec_xl( 0, q6); vector signed char qxs1 = (vector signed char)vec_xl(16, q6); vector signed char qxs2 = (vector signed char)vec_xl(32, q6); vector signed char qxs3 = (vector signed char)vec_xl(48, q6); q6 += 64; vector signed char qxs00 = vec_and(qxs0, lowMask); vector signed char qxs01 = vec_sr(qxs0, v4); vector signed char qxs10 = vec_and(qxs1, lowMask); vector signed char qxs11 = vec_sr(qxs1, v4); vector signed char qxs20 = vec_and(qxs2, lowMask); vector signed char qxs21 = vec_sr(qxs2, v4); vector signed char qxs30 = vec_and(qxs3, lowMask); vector signed char qxs31 = vec_sr(qxs3, v4); vector signed char qxhs0 = (vector signed char)vec_xl( 0, qh); vector signed char qxhs1 = (vector signed char)vec_xl(16, qh); qh += 32; vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4); vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4); vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, qxhs1), v4); vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v4)), v4); vector signed char qxh20 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4); vector signed char qxh21 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4); vector signed char qxh30 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v2)), v4); vector signed char qxh31 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v6)), v4); vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off); vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off); vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off); vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off); vector signed char q6x20 = vec_sub(vec_or(qxh20, qxs20), off); vector signed char q6x21 = vec_sub(vec_or(qxh21, qxs21), off); vector signed char q6x30 = vec_sub(vec_or(qxh30, qxs30), off); vector signed char q6x31 = vec_sub(vec_or(qxh31, qxs31), off); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl( 16, q8); vector signed char q8y20 = vec_xl( 32, q8); vector signed char q8y30 = vec_xl( 48, q8); vector signed char q8y01 = vec_xl( 64, q8); vector signed char q8y11 = vec_xl( 80, q8); vector signed char q8y21 = vec_xl( 96, q8); vector signed char q8y31 = vec_xl(112, q8); q8 += 128; vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00)); vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10)); vector signed short qv20 = vec_add(vec_mule(q6x20, q8y20), vec_mulo(q6x20, q8y20)); vector signed short qv30 = vec_add(vec_mule(q6x30, q8y30), vec_mulo(q6x30, q8y30)); vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01)); vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11)); vector signed short qv21 = vec_add(vec_mule(q6x21, q8y21), vec_mulo(q6x21, q8y21)); vector signed short qv31 = vec_add(vec_mule(q6x31, q8y31), vec_mulo(q6x31, q8y31)); vector signed short vscales = vec_unpackh(vec_xl_len(qs, 8)); qs += 8; vector signed short vs0 = vec_splat(vscales, 0); vector signed short vs1 = vec_splat(vscales, 1); vector signed short vs2 = vec_splat(vscales, 2); vector signed short vs3 = vec_splat(vscales, 3); vector signed short vs4 = vec_splat(vscales, 4); vector signed short vs5 = vec_splat(vscales, 5); vector signed short vs6 = vec_splat(vscales, 6); vector signed short vs7 = vec_splat(vscales, 7); vsumi0 = vec_msum(qv00, vs0, vsumi0); vsumi1 = vec_msum(qv01, vs4, vsumi1); vsumi2 = vec_msum(qv10, vs1, vsumi2); vsumi3 = vec_msum(qv11, vs5, vsumi3); vsumi4 = vec_msum(qv20, vs2, vsumi4); vsumi5 = vec_msum(qv21, vs6, vsumi5); vsumi6 = vec_msum(qv30, vs3, vsumi6); vsumi7 = vec_msum(qv31, vs7, vsumi7); } vsumi0 = vec_add(vsumi0, vsumi4); vsumi1 = vec_add(vsumi1, vsumi5); vsumi2 = vec_add(vsumi2, vsumi6); vsumi3 = vec_add(vsumi3, vsumi7); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else int8_t aux8[QK_K]; int16_t aux16[8]; float sums [8]; int32_t aux32[8]; memset(sums, 0, 8*sizeof(float)); float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q4 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; memset(aux32, 0, 8*sizeof(int32_t)); int8_t * GGML_RESTRICT a = aux8; for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; } a += 128; q4 += 64; qh += 32; } a = aux8; int is = 0; for (int j = 0; j < QK_K/16; ++j) { int scale = x[i].scales[is++]; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; #endif } #if defined (__POWER9_VECTOR__) static const int8_t keven_signs_q2xs[1024] = { 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, }; #endif void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q2, 0, 1); __builtin_prefetch(q8, 0, 1); uint32_t aux32[4]; const uint8_t * aux8 = (const uint8_t *)aux32; memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1])}; vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3])}; vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9])}; vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])}; vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))}; vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))}; vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))}; vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))}; vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); const uint16_t ls0 = aux32[1] >> 28; const uint16_t ls1 = aux32[3] >> 28; vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1)); vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1)); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = 0.125f * vec_extract(vsumf0, 0); #else uint32_t aux32[2]; const uint8_t * aux8 = (const uint8_t *)aux32; float sumf = 0.f; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { memcpy(aux32, q2, 2*sizeof(uint32_t)); q2 += 4; const uint32_t ls = 2*(aux32[1] >> 28) + 1; int32_t sumi = 0; for (int l = 0; l < 4; ++l) { const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; for (int j = 0; j < 8; ++j) { sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } bsum += sumi * ls; } sumf += d * bsum; } *s = 0.125f * sumf; #endif } void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/64; ++j) { __builtin_prefetch(q2, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xs_grid + (q2[0] & 511)), *(const int64_t *)(iq2xs_grid + (q2[1] & 511))}; vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xs_grid + (q2[2] & 511)), *(const int64_t *)(iq2xs_grid + (q2[3] & 511))}; vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xs_grid + (q2[4] & 511)), *(const int64_t *)(iq2xs_grid + (q2[5] & 511))}; vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xs_grid + (q2[6] & 511)), *(const int64_t *)(iq2xs_grid + (q2[7] & 511))}; vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((q2[0] >> 9))), *(const int64_t *)(signs64 + ((q2[1] >> 9)))}; vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((q2[2] >> 9))), *(const int64_t *)(signs64 + ((q2[3] >> 9)))}; vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((q2[4] >> 9))), *(const int64_t *)(signs64 + ((q2[5] >> 9)))}; vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((q2[6] >> 9))), *(const int64_t *)(signs64 + ((q2[7] >> 9)))}; q2 += 8; vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); const uint16_t ls1 = (uint16_t)(sc[0] >> 4); const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); const uint16_t ls3 = (uint16_t)(sc[1] >> 4); sc += 2; vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); vsumi0 = vec_msum(qv0, vscales0, vsumi0); vsumi1 = vec_msum(qv1, vscales1, vsumi1); vsumi2 = vec_msum(qv2, vscales2, vsumi2); vsumi3 = vec_msum(qv3, vscales3, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = 0.125f * vec_extract(vsumf0, 0); #else float sumf = 0.f; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; int32_t sumi = 0; for (int l = 0; l < 2; ++l) { const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; for (int j = 0; j < 8; ++j) { sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } bsum += sumi * ls1; sumi = 0; for (int l = 2; l < 4; ++l) { const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; for (int j = 0; j < 8; ++j) { sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } bsum += sumi * ls2; q2 += 4; } sumf += d * bsum; } *s = 0.125f * sumf; #endif } void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const vector unsigned char mask0 = vec_xl( 0, k_mask1); const vector unsigned char mask1 = vec_xl(16, k_mask1); const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint8_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q2, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed long long aux64x2_0 = {*(const int64_t *)(iq2s_grid + (q2[0] | ((qh[0] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[1] | ((qh[0] << 6) & 0x300)))}; vector signed long long aux64x2_1 = {*(const int64_t *)(iq2s_grid + (q2[2] | ((qh[0] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[3] | ((qh[0] << 2) & 0x300)))}; vector signed long long aux64x2_2 = {*(const int64_t *)(iq2s_grid + (q2[4] | ((qh[1] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[5] | ((qh[1] << 6) & 0x300)))}; vector signed long long aux64x2_3 = {*(const int64_t *)(iq2s_grid + (q2[6] | ((qh[1] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[7] | ((qh[1] << 2) & 0x300)))}; q2 += 8; qh += 2; vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); vector signed char vsigns23 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); signs += 4; vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); vector signed char vsigns2 = vec_perm(vsigns23, vsigns23, mask0); vector signed char vsigns3 = vec_perm(vsigns23, vsigns23, mask1); vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); vector signed char q2x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux64x2_0), vsigns0); vector signed char q2x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux64x2_1), vsigns1); vector signed char q2x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux64x2_2), vsigns2); vector signed char q2x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux64x2_3), vsigns3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); const uint16_t ls1 = (uint16_t)(sc[0] >> 4); const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); const uint16_t ls3 = (uint16_t)(sc[1] >> 4); sc += 2; vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); vsumi0 = vec_msum(qv0, vscales0, vsumi0); vsumi1 = vec_msum(qv1, vscales1, vsumi1); vsumi2 = vec_msum(qv2, vscales2, vsumi2); vsumi3 = vec_msum(qv3, vscales3, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = 0.125f * vec_extract(vsumf0, 0); #else float sumf = 0; for (int i = 0; i < nb; i++) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; const uint8_t * signs = qs + QK_K/8; int bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); int sumi1 = 0, sumi2 = 0; for (int l = 0; l < 2; ++l) { const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); for (int j = 0; j < 8; ++j) { sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } for (int l = 2; l < 4; ++l) { const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); for (int j = 0; j < 8; ++j) { sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } bsum += ls1 * sumi1 + ls2 * sumi2; qs += 4; signs += 4; } sumf += d * bsum; } *s = 0.125f * sumf; #endif } void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint32_t * GGML_RESTRICT signs = (const uint32_t *)(x[i].qs + QK_K/4); const int8_t * GGML_RESTRICT q8 = y[i].qs; #pragma GCC unroll 1 for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q3, 0, 1); __builtin_prefetch(q8, 0, 1); vector unsigned int aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]}; vector unsigned int aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]}; vector unsigned int aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]}; vector unsigned int aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]}; q3 += 16; vector unsigned long long aux64x2_0 = {(uint64_t)(signs64[(signs[0] >> 0) & 127]), (uint64_t)(signs64[(signs[0] >> 7) & 127])}; vector unsigned long long aux64x2_1 = {(uint64_t)(signs64[(signs[0] >> 14) & 127]), (uint64_t)(signs64[(signs[0] >> 21) & 127])}; vector unsigned long long aux64x2_2 = {(uint64_t)(signs64[(signs[1] >> 0) & 127]), (uint64_t)(signs64[(signs[1] >> 7) & 127])}; vector unsigned long long aux64x2_3 = {(uint64_t)(signs64[(signs[1] >> 14) & 127]), (uint64_t)(signs64[(signs[1] >> 21) & 127])}; vector signed char q3x0 = vec_mul((vector signed char)aux64x2_0, (vector signed char)aux32x4_0); vector signed char q3x1 = vec_mul((vector signed char)aux64x2_1, (vector signed char)aux32x4_1); vector signed char q3x2 = vec_mul((vector signed char)aux64x2_2, (vector signed char)aux32x4_2); vector signed char q3x3 = vec_mul((vector signed char)aux64x2_3, (vector signed char)aux32x4_3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); const uint16_t ls0 = (uint16_t)(signs[0] >> 28); const uint16_t ls1 = (uint16_t)(signs[1] >> 28); signs += 2; vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = 0.25f * vec_extract(vsumf0, 0); #else uint32_t aux32; float sumf = 0.f; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); const uint32_t ls = 2*(aux32 >> 28) + 1; int32_t sumi = 0; for (int l = 0; l < 4; ++l) { const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; for (int j = 0; j < 4; ++j) { sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); } q8 += 8; } q3 += 8; bsum += sumi * ls; } sumf += d * bsum; } *s = 0.25f * sumf; #endif } void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const vector unsigned char mask0 = vec_xl( 0, k_mask1); const vector unsigned char mask1 = vec_xl(16, k_mask1); const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].signs); const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q3, 0, 1); __builtin_prefetch(q8, 0, 1); vector unsigned int aux32x4_0 = {iq3s_grid[q3[ 0] | ((qh[0] << 8) & 256)], iq3s_grid[q3[ 1] | ((qh[0] << 7) & 256)], iq3s_grid[q3[ 2] | ((qh[0] << 6) & 256)], iq3s_grid[q3[ 3] | ((qh[0] << 5) & 256)]}; vector unsigned int aux32x4_1 = {iq3s_grid[q3[ 4] | ((qh[0] << 4) & 256)], iq3s_grid[q3[ 5] | ((qh[0] << 3) & 256)], iq3s_grid[q3[ 6] | ((qh[0] << 2) & 256)], iq3s_grid[q3[ 7] | ((qh[0] << 1) & 256)]}; vector unsigned int aux32x4_2 = {iq3s_grid[q3[ 8] | ((qh[1] << 8) & 256)], iq3s_grid[q3[ 9] | ((qh[1] << 7) & 256)], iq3s_grid[q3[10] | ((qh[1] << 6) & 256)], iq3s_grid[q3[11] | ((qh[1] << 5) & 256)]}; vector unsigned int aux32x4_3 = {iq3s_grid[q3[12] | ((qh[1] << 4) & 256)], iq3s_grid[q3[13] | ((qh[1] << 3) & 256)], iq3s_grid[q3[14] | ((qh[1] << 2) & 256)], iq3s_grid[q3[15] | ((qh[1] << 1) & 256)]}; q3 += 16; qh += 2; vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); vector signed char vsigns02 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); signs += 4; vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); vector signed char vsigns2 = vec_perm(vsigns02, vsigns02, mask0); vector signed char vsigns3 = vec_perm(vsigns02, vsigns02, mask1); vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); vector signed char q3x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux32x4_0), vsigns0); vector signed char q3x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux32x4_1), vsigns1); vector signed char q3x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux32x4_2), vsigns2); vector signed char q3x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux32x4_3), vsigns3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); const uint16_t ls1 = (uint16_t)(sc[0] >> 4); sc ++; vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else float sumf = 0.f; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint8_t * GGML_RESTRICT signs = x[i].signs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; int32_t sumi = 0; for (int l = 0; l < 4; ++l) { const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); for (int j = 0; j < 4; ++j) { sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); } q8 += 8; } qs += 8; signs += 4; bsum += sumi * ls1; sumi = 0; for (int l = 0; l < 4; ++l) { const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); for (int j = 0; j < 4; ++j) { sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); } q8 += 8; } qs += 8; signs += 4; bsum += sumi * ls2; } sumf += d * bsum; } *s = sumf; #endif } void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq1_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector unsigned char v0 = vec_splats((unsigned char)0x0); const vector unsigned short vsign = vec_splats((unsigned short)0x8000); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = vec_splats((int32_t)0); vector signed int vsumi1 = vec_splats((int32_t)0); vector signed int vsumi2 = vec_splats((int32_t)0); vector signed int vsumi3 = vec_splats((int32_t)0); vector signed int vsumi8 = vec_splats((int32_t)0); const uint8_t * GGML_RESTRICT q1 = x[i].qs; const uint16_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; const int16_t * GGML_RESTRICT qs = y[i].bsums; for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q1, 0, 1); __builtin_prefetch(qh, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed long long aux64x2_0 = {*(const int64_t *)(iq1s_grid + (q1[0] | ((qh[0] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[1] | ((qh[0] << 5) & 0x700)))}; vector signed long long aux64x2_1 = {*(const int64_t *)(iq1s_grid + (q1[2] | ((qh[0] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[3] | ((qh[0] >> 1) & 0x700)))}; vector signed long long aux64x2_2 = {*(const int64_t *)(iq1s_grid + (q1[4] | ((qh[1] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[5] | ((qh[1] << 5) & 0x700)))}; vector signed long long aux64x2_3 = {*(const int64_t *)(iq1s_grid + (q1[6] | ((qh[1] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[7] | ((qh[1] >> 1) & 0x700)))}; q1 += 8; vector signed char q1x0 = (vector signed char)aux64x2_0; vector signed char q1x1 = (vector signed char)aux64x2_1; vector signed char q1x2 = (vector signed char)aux64x2_2; vector signed char q1x3 = (vector signed char)aux64x2_3; vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q1x0, q8y0), vec_mulo(q1x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q1x1, q8y1), vec_mulo(q1x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q1x2, q8y2), vec_mulo(q1x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q1x3, q8y3), vec_mulo(q1x3, q8y3)); const uint16_t ls0 = (uint16_t)((qh[0] >> 12) & 7); const uint16_t ls1 = (uint16_t)((qh[1] >> 12) & 7); vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); vector signed short vscales = vec_sld(vscales23, vscales01, 8); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); vector signed short q8ysums = vec_xl_len(qs, 8); qs += 4; q8ysums = vec_mergeh(q8ysums, (vector signed short)v0); vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8); qh += 2; vector __bool short vsel = vec_cmpge(qxh, (vector signed short)v0); vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel); vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); vsumf0 = vec_madd(vec_ctf(vsumi8, 0), vec_mul(vd, vec_splats(IQ1S_DELTA)), vsumf0); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else float sumf = 0; for (int i = 0; i < nb; i++) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint16_t * qh = x[i].qh; int sumi = 0, sumi1 = 0; for (int ib = 0; ib < QK_K/32; ++ib) { const int ls = 2*((qh[ib] >> 12) & 7) + 1; const int delta = qh[ib] & 0x8000 ? -1 : 1; int lsum = 0; for (int l = 0; l < 4; ++l) { const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); for (int j = 0; j < 8; ++j) { lsum += q8[j] * grid[j]; } q8 += 8; } sumi += ls * lsum; sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); qs += 4; } sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); } *s = sumf; #endif } void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK4_NL == 0); static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); const block_iq4_nl * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK4_NL; int ib = 0; float sumf = 0; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); const vector signed char values = vec_xl( 0, kvalues_iq4nl); #pragma GCC unroll 4 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector signed char q4x0 = vec_and(qxs, lowMask); vector signed char q4x1 = vec_sr(qxs, v4); q4x0 = vec_perm(values, values, (vector unsigned char)q4x0); q4x1 = vec_perm(values, values, (vector unsigned char)q4x1); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl(16, y[ib].qs); vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vsumi0 = vec_sum4s(qv0, vsumi0); vsumi1 = vec_sum4s(qv1, vsumi1); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); } vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); #endif for (; ib < nb; ++ib) { const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; } sumf += d * (sumi1 + sumi2); } *s = sumf; } void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_K == 0); const block_iq4_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const vector signed char values = vec_xl( 0, kvalues_iq4nl); for (int ibl = 0; ibl < nb; ++ibl) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ibl].d)); vector float vyd = vec_splats(y[ibl].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; uint16_t h = x[ibl].scales_h; const uint8_t * GGML_RESTRICT q4 = x[ibl].qs; const uint8_t * GGML_RESTRICT sc = x[ibl].scales_l; const int8_t * GGML_RESTRICT q8 = y[ibl].qs; for (int ib = 0; ib < QK_K/64; ib ++ ) { __builtin_prefetch(q4, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); vector signed char qxs1 = (vector signed char)vec_xl(16, q4); q4 += 32; vector signed char q4x00 = (vector signed char)vec_and(qxs0, lowMask); vector signed char q4x01 = (vector signed char)vec_sr(qxs0, v4); vector signed char q4x10 = (vector signed char)vec_and(qxs1, lowMask); vector signed char q4x11 = (vector signed char)vec_sr(qxs1, v4); q4x00 = vec_perm(values, values, (vector unsigned char)q4x00); q4x01 = vec_perm(values, values, (vector unsigned char)q4x01); q4x10 = vec_perm(values, values, (vector unsigned char)q4x10); q4x11 = vec_perm(values, values, (vector unsigned char)q4x11); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q4x00, q8y0), vec_mulo(q4x00, q8y0)); vector signed short qv1 = vec_add(vec_mule(q4x01, q8y1), vec_mulo(q4x01, q8y1)); vector signed short qv2 = vec_add(vec_mule(q4x10, q8y2), vec_mulo(q4x10, q8y2)); vector signed short qv3 = vec_add(vec_mule(q4x11, q8y3), vec_mulo(q4x11, q8y3)); const uint16_t ls0 = (uint16_t)(((sc[0] & 0xf) | ((h << 4) & 0x30)) - 32); const uint16_t ls1 = (uint16_t)(((sc[0] >> 4) | ((h << 2) & 0x30)) - 32); h >>= 4; sc ++; vector signed short vscales01 = vec_splats((int16_t)ls0); vector signed short vscales23 = vec_splats((int16_t)ls1); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d; uint16_t h = x[ibl].scales_h; const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; for (int ib = 0; ib < QK_K/32; ib += 2) { const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); h >>= 4; const float d1 = d4d8*(ls1 - 32); const float d2 = d4d8*(ls2 - 32); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < 16; ++j) { sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; } sumf += d1 * (sumi1 + sumi2); qs += 16; q8 += 32; sumi1 = sumi2 = 0; for (int j = 0; j < 16; ++j) { sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; } sumf += d2 * (sumi1 + sumi2); qs += 16; q8 += 32; } } *s = sumf; #endif }