mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 17:44:38 +00:00
* ggml-cpu: add nnpa compile flag Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> (cherry picked from commit 4a9f60c201573128f73a65999b3e5cc497fae5c1) * ggml-cpu: add fp16->fp32 nnpa first Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> (cherry picked from commit 8d4a7987f9c1887f716be96250f2caeee0253929) * ggml-cpu: add fp32->fp16 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> (cherry picked from commit 0ff0d6516247a41d2ade42b42cf0d676a4dd1627) * ggml-cpu: better variable names Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> (cherry picked from commit 2f58bbcbb89c183340e252362b2a40651f573f1f) * docs: update s390x docs Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> (cherry picked from commit 01b929491b50071a5d0572235dcf5a449da70aa7) * ggml-cpu: add debugging prints to see if dlf16 is correct Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix print vs printf Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix float placeholder Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: ensure fp16 and fp32 load and stores are called Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fp16 load ensured to hit Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: remove sigint from fp16 store for some reason, the function is not getting a hit when debugged with gdb. we will need to investigate further Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: activate nnpa for ggml_cpu_fp16_to_fp32 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: nnpa activate ggml_cpu_fp16_to_fp32 for 8 elements Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: nnpa switch to vec_xst test Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: switch to vec_xst for 4 element loops also Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: rework noop Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: remove noop, general code cleanup Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: clarify variable naming Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: activate nnpa for ggml_cpu_fp32_to_fp16 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add breakpoint for debugging Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: test fix for conversion failure Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: disable fp32->fp16 nnpa conversions for now there are some conversion failures in nnpa that requires the eyes of an ibm stsm. will create a separate pr to introduce the fp32->fp16 change. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: switch to elif macro Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: reattempt fp32->fp16 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix typo Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: reattempt fp32->fp16 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix compiler types Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: change to typedef vector types Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add 4 element loops for fp32->fp16 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: clarified vector naming Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: bring back fp32->fp16 store nnpa Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: activate nnpa fp32->fp16 or fp16->fp32 compute Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add nnpa macro check in ggml-impl Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add missing __func__ Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: diagnose why __NNPA__ macro is not being defined Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: import vecintrin.h to fix compiler errors Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: update macro tests Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: move s390x typedef to own header file Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * Revert "ggml-cpu: move s390x typedef to own header file" This reverts commit 157f856c34589566151630e294563a420702db39. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: switch to importing ggml-cpu-impl instead Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix macro declaration Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: test more macros Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add debug prints Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: bruteforce macro definitions Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: move macro definitions Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add ggml-impl.h to cmakelists Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: switch to private macros Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: move s390x typedef to own header file Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> (cherry picked from commit 157f856c34589566151630e294563a420702db39) * ggml-cpu: move things around Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: bring back compile macros Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: switch to quotes for import Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add compiler error macro Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add s390x detection in ggml-src Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: bring back compile definitions Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: undo cmakelists work Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * Revert "ggml-cpu: move s390x typedef to own header file" This reverts commit 18d79e1a30b39d9aaa0bd58400c5cf2c32135c9a. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: remove typedefs.h Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: remove typedef from cmakelists Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add ggml-impl.h future notes Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: add todo comment for future reference Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: clarify naming of dlf16 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: remove unnecessary target compile definitions Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: move nnpa fp16->fp32 and fp32->fp16 to simd-mappings Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml: refactor fp32->fp16 and fp16->fp32 simd to ggml-cpu Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * docs: update broken huggingface link for s390x Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix duplicate func names during compile Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * Revert "ggml-cpu: fix duplicate func names during compile" This reverts commit fbb733451f27677063b914d4f6c9a9841d45b38d. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * Revert "ggml: refactor fp32->fp16 and fp16->fp32 simd to ggml-cpu" This reverts commit bd288e8fa52b5244f65cee21cb61062f1a9e0ca5. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml: refactor fp16<->fp32 simd to ggml-cpu Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix missing simd-mappings.h import in quants.c Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix missing simd-mappings.h within repack Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix amx mmq missing simd-mappings.h Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: attempt at fixing loongarch failing build Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: move nnpa together with other fp16<->fp32 simd Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: fix wrong refactor of ggml-base ref: https://github.com/ggml-org/llama.cpp/pull/14317#discussion_r2164176555 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml: remove dependency on ggml-cpu from ggml-base Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: rename all fp16<->fp32 macros to prefix with ggml_cpu ref: https://github.com/ggml-org/llama.cpp/pull/14317#discussion_r2164449406 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: remove mistaken fallback macro fallback logic was already implemented but i was too sleepy to realise Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml: move ggml_table_f32_f16 to ggml-cpu ref: https://github.com/ggml-org/llama.cpp/pull/14317#discussion_r2164775006 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: move ggml_table_f32_f16 back to ggml-base due to ci failures Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * Revert "ggml-cpu: move ggml_table_f32_f16 back to ggml-base due to ci failures" This reverts commit 32a3533564bdb7902cefb9c89b1c9e956a81ce29. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * Revert "ggml: move ggml_table_f32_f16 to ggml-cpu" This reverts commit 9e40d984ad27d7b60392fb2b7548885201864fe4. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml: move ggml_table_f32_f16 to ggml-cpu ref: https://github.com/ggml-org/llama.cpp/pull/14317#discussion_r2164775006 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> (cherry picked from commit 9e40d984ad27d7b60392fb2b7548885201864fe4) * ggml: move ggml_table_f32_f16 to ggml-cpu.c Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: extern c ggml_table_f32_f16 + chore docs Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: dedup ggml_table_f32_f16 from simd-mappings.h we rely on the variable declaration in ggml-cpu.c instead Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * Revert "ggml-cpu: dedup ggml_table_f32_f16 from simd-mappings.h" This reverts commit f71b21d2f74f5e03ec0c2b4fefd3cbf395aecf16. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * ggml-cpu: bring back ggml_table_f32_f16 Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * Revert "ggml-cpu: bring back ggml_table_f32_f16" This reverts commit 2dce119178bed5ef5c8398c4230ddd14fef80e49. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> * fix ggml time initialization * fix f32_f16 table init * remove extra line --------- Signed-off-by: Aaron Teo <aaron.teo1@ibm.com> Co-authored-by: slaren <slarengh@gmail.com>
1158 lines
38 KiB
C
1158 lines
38 KiB
C
#define GGML_COMMON_IMPL_C
|
|
#include "ggml-common.h"
|
|
|
|
#include "ggml-cpu-impl.h"
|
|
#include "simd-mappings.h"
|
|
#include "ggml-quants.h"
|
|
#include "quants.h"
|
|
|
|
#include "arch-fallback.h"
|
|
|
|
#include <string.h>
|
|
#include <assert.h>
|
|
#include <float.h>
|
|
#include <stdlib.h> // for qsort
|
|
#include <stdio.h> // for GGML_ASSERT
|
|
|
|
#define GROUP_MAX_EPS 1e-15f
|
|
#define GROUP_MAX_EPS_IQ3_XXS 1e-8f
|
|
#define GROUP_MAX_EPS_IQ2_S 1e-8f
|
|
#define GROUP_MAX_EPS_IQ1_M 1e-7f
|
|
#define GROUP_MAX_EPS_IQ1_S 1e-12f
|
|
|
|
#define UNUSED GGML_UNUSED
|
|
|
|
void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
|
|
quantize_row_q4_0_ref(x, y, k);
|
|
}
|
|
|
|
void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
|
|
quantize_row_q4_1_ref(x, y, k);
|
|
}
|
|
|
|
void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
|
|
quantize_row_q5_0_ref(x, y, k);
|
|
}
|
|
|
|
void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
|
|
quantize_row_q5_1_ref(x, y, k);
|
|
}
|
|
|
|
void quantize_row_q8_0_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
|
|
quantize_row_q8_0_ref(x, y, k);
|
|
}
|
|
|
|
void quantize_row_q8_1_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
|
|
quantize_row_q8_1_ref(x, y, k);
|
|
}
|
|
|
|
//
|
|
// 2-6 bit quantization in super-blocks
|
|
//
|
|
|
|
//========================- 2-bit (de)-quantization
|
|
|
|
void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
|
quantize_row_q2_K_ref(x, vy, k);
|
|
}
|
|
|
|
//========================= 3-bit (de)-quantization
|
|
|
|
void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
|
quantize_row_q3_K_ref(x, vy, k);
|
|
}
|
|
|
|
// ====================== 4-bit (de)-quantization
|
|
|
|
void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
|
assert(k % QK_K == 0);
|
|
block_q4_K * GGML_RESTRICT y = vy;
|
|
quantize_row_q4_K_ref(x, y, k);
|
|
}
|
|
|
|
// ====================== 5-bit (de)-quantization
|
|
|
|
void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
|
assert(k % QK_K == 0);
|
|
block_q5_K * GGML_RESTRICT y = vy;
|
|
quantize_row_q5_K_ref(x, y, k);
|
|
}
|
|
|
|
// ====================== 6-bit (de)-quantization
|
|
|
|
void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
|
assert(k % QK_K == 0);
|
|
block_q6_K * GGML_RESTRICT y = vy;
|
|
quantize_row_q6_K_ref(x, y, k);
|
|
}
|
|
|
|
// ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs)
|
|
|
|
void quantize_row_tq1_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
|
assert(k % QK_K == 0);
|
|
block_tq1_0 * GGML_RESTRICT y = vy;
|
|
quantize_row_tq1_0_ref(x, y, k);
|
|
}
|
|
|
|
void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
|
assert(k % QK_K == 0);
|
|
block_tq2_0 * GGML_RESTRICT y = vy;
|
|
quantize_row_tq2_0_ref(x, y, k);
|
|
}
|
|
|
|
//===================================== Q8_K ==============================================
|
|
|
|
void quantize_row_q8_K_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
|
|
quantize_row_q8_K_ref(x, y, k);
|
|
}
|
|
|
|
//===================================== Dot products =================================
|
|
|
|
void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
const int qk = QK8_0;
|
|
const int nb = n / qk;
|
|
|
|
assert(n % qk == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_q4_0 * GGML_RESTRICT x = vx;
|
|
const block_q8_0 * GGML_RESTRICT y = vy;
|
|
|
|
int ib = 0;
|
|
float sumf = 0;
|
|
|
|
for (; ib < nb; ++ib) {
|
|
int sumi0 = 0;
|
|
int sumi1 = 0;
|
|
|
|
for (int j = 0; j < qk/2; ++j) {
|
|
const int v0 = (x[ib].qs[j] & 0x0F) - 8;
|
|
const int v1 = (x[ib].qs[j] >> 4) - 8;
|
|
|
|
sumi0 += (v0 * y[ib].qs[j]);
|
|
sumi1 += (v1 * y[ib].qs[j + qk/2]);
|
|
}
|
|
|
|
int sumi = sumi0 + sumi1;
|
|
sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d);
|
|
}
|
|
|
|
*s = sumf;
|
|
}
|
|
|
|
// TODO: add WASM SIMD
|
|
void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
const int qk = QK8_1;
|
|
const int nb = n / qk;
|
|
|
|
assert(n % qk == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_q4_1 * GGML_RESTRICT x = vx;
|
|
const block_q8_1 * GGML_RESTRICT y = vy;
|
|
|
|
int ib = 0;
|
|
float sumf = 0;
|
|
|
|
for (; ib < nb; ++ib) {
|
|
int sumi0 = 0;
|
|
int sumi1 = 0;
|
|
|
|
for (int j = 0; j < qk/2; ++j) {
|
|
const int v0 = (x[ib].qs[j] & 0x0F);
|
|
const int v1 = (x[ib].qs[j] >> 4);
|
|
|
|
sumi0 += (v0 * y[ib].qs[j]);
|
|
sumi1 += (v1 * y[ib].qs[j + qk/2]);
|
|
}
|
|
|
|
int sumi = sumi0 + sumi1;
|
|
sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
|
|
}
|
|
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
const int qk = QK8_0;
|
|
const int nb = n / qk;
|
|
|
|
int ib = 0;
|
|
float sumf = 0;
|
|
|
|
assert(n % qk == 0);
|
|
assert(qk == QK5_0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_q5_0 * GGML_RESTRICT x = vx;
|
|
const block_q8_0 * GGML_RESTRICT y = vy;
|
|
|
|
for (; ib < nb; ++ib) {
|
|
uint32_t qh;
|
|
memcpy(&qh, x[ib].qh, sizeof(qh));
|
|
|
|
int sumi0 = 0;
|
|
int sumi1 = 0;
|
|
|
|
for (int j = 0; j < qk/2; ++j) {
|
|
const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
|
|
const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
|
|
|
|
const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16);
|
|
const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16);
|
|
|
|
sumi0 += (x0 * y[ib].qs[j]);
|
|
sumi1 += (x1 * y[ib].qs[j + qk/2]);
|
|
}
|
|
|
|
int sumi = sumi0 + sumi1;
|
|
sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi;
|
|
}
|
|
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
const int qk = QK8_1;
|
|
const int nb = n / qk;
|
|
|
|
int ib = 0;
|
|
float sumf = 0;
|
|
|
|
assert(n % qk == 0);
|
|
assert(qk == QK5_1);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_q5_1 * GGML_RESTRICT x = vx;
|
|
const block_q8_1 * GGML_RESTRICT y = vy;
|
|
|
|
for (; ib < nb; ++ib) {
|
|
uint32_t qh;
|
|
memcpy(&qh, x[ib].qh, sizeof(qh));
|
|
|
|
int sumi0 = 0;
|
|
int sumi1 = 0;
|
|
|
|
for (int j = 0; j < qk/2; ++j) {
|
|
const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
|
|
const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
|
|
|
|
const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0;
|
|
const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1;
|
|
|
|
sumi0 += (x0 * y[ib].qs[j]);
|
|
sumi1 += (x1 * y[ib].qs[j + qk/2]);
|
|
}
|
|
|
|
int sumi = sumi0 + sumi1;
|
|
sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
|
|
}
|
|
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
const int qk = QK8_0;
|
|
const int nb = n / qk;
|
|
|
|
assert(n % qk == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_q8_0 * GGML_RESTRICT x = vx;
|
|
const block_q8_0 * GGML_RESTRICT y = vy;
|
|
|
|
int ib = 0;
|
|
float sumf = 0;
|
|
|
|
for (; ib < nb; ++ib) {
|
|
int sumi = 0;
|
|
|
|
for (int j = 0; j < qk; j++) {
|
|
sumi += x[ib].qs[j]*y[ib].qs[j];
|
|
}
|
|
|
|
sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
|
|
}
|
|
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_tq1_0 * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243};
|
|
|
|
float sumf = 0.0f;
|
|
|
|
for (int i = 0; i < nb; ++i) {
|
|
int sum = 0;
|
|
|
|
for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) {
|
|
for (size_t l = 0; l < 5; ++l) {
|
|
for (size_t m = 0; m < 32; ++m) {
|
|
uint8_t q = x[i].qs[j + m] * pow3[l];
|
|
uint16_t xi = ((uint16_t) q * 3) >> 8;
|
|
sum += (xi - 1) * y[i].qs[j*5 + l*32 + m];
|
|
}
|
|
}
|
|
}
|
|
for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) {
|
|
for (size_t l = 0; l < 5; ++l) {
|
|
for (size_t m = 0; m < 16; ++m) {
|
|
uint8_t q = x[i].qs[j + m] * pow3[l];
|
|
uint16_t xi = ((uint16_t) q * 3) >> 8;
|
|
sum += (xi - 1) * y[i].qs[j*5 + l*16 + m];
|
|
}
|
|
}
|
|
}
|
|
|
|
for (size_t l = 0; l < 4; ++l) {
|
|
for (size_t j = 0; j < sizeof(x->qh); ++j) {
|
|
uint8_t q = x[i].qh[j] * pow3[l];
|
|
uint16_t xi = ((uint16_t) q * 3) >> 8;
|
|
sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j];
|
|
}
|
|
}
|
|
|
|
sumf += (float) sum * (GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d);
|
|
}
|
|
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_tq2_0 * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
float sumf = 0.0f;
|
|
|
|
for (int i = 0; i < nb; ++i) {
|
|
int32_t sumi = 0;
|
|
|
|
for (size_t j = 0; j < sizeof(x->qs); j += 32) {
|
|
for (size_t l = 0; l < 4; ++l) {
|
|
for (size_t k = 0; k < 32; ++k) {
|
|
sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1);
|
|
}
|
|
}
|
|
}
|
|
|
|
const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
|
|
|
|
sumf += (float) sumi * d;
|
|
}
|
|
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_q2_K * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
float sumf = 0;
|
|
|
|
for (int i = 0; i < nb; ++i) {
|
|
|
|
const uint8_t * q2 = x[i].qs;
|
|
const int8_t * q8 = y[i].qs;
|
|
const uint8_t * sc = x[i].scales;
|
|
|
|
int summs = 0;
|
|
for (int j = 0; j < 16; ++j) {
|
|
summs += y[i].bsums[j] * (sc[j] >> 4);
|
|
}
|
|
|
|
const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
|
|
const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
|
|
|
|
int isum = 0;
|
|
int is = 0;
|
|
int d;
|
|
for (int k = 0; k < QK_K/128; ++k) {
|
|
int shift = 0;
|
|
for (int j = 0; j < 4; ++j) {
|
|
d = sc[is++] & 0xF;
|
|
int isuml = 0;
|
|
for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
|
|
isum += d * isuml;
|
|
d = sc[is++] & 0xF;
|
|
isuml = 0;
|
|
for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
|
|
isum += d * isuml;
|
|
shift += 2;
|
|
q8 += 32;
|
|
}
|
|
q2 += 32;
|
|
}
|
|
sumf += dall * isum - dmin * summs;
|
|
}
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const uint32_t kmask1 = 0x03030303;
|
|
const uint32_t kmask2 = 0x0f0f0f0f;
|
|
|
|
const block_q3_K * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
// scalar version
|
|
// This function is written like this so the compiler can manage to vectorize most of it
|
|
// Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
|
|
// manually vectorized version above. Every other version I tried would run at least 4 times slower.
|
|
// The ideal situation would be if we could just write the code once, and the compiler would
|
|
// automatically produce the best possible set of machine instructions, instead of us having to manually
|
|
// write vectorized versions for AVX, ARM_NEON, etc.
|
|
|
|
int8_t aux8[QK_K];
|
|
int16_t aux16[8];
|
|
float sums [8];
|
|
int32_t aux32[8];
|
|
memset(sums, 0, 8*sizeof(float));
|
|
|
|
uint32_t auxs[4];
|
|
const int8_t * scales = (const int8_t*)auxs;
|
|
|
|
float sumf = 0;
|
|
for (int i = 0; i < nb; ++i) {
|
|
const uint8_t * GGML_RESTRICT q3 = x[i].qs;
|
|
const uint8_t * GGML_RESTRICT hm = x[i].hmask;
|
|
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
|
memset(aux32, 0, 8*sizeof(int32_t));
|
|
int8_t * GGML_RESTRICT a = aux8;
|
|
uint8_t m = 1;
|
|
for (int j = 0; j < QK_K; j += 128) {
|
|
for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
|
|
for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
|
|
a += 32; m <<= 1;
|
|
for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
|
|
for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
|
|
a += 32; m <<= 1;
|
|
for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
|
|
for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
|
|
a += 32; m <<= 1;
|
|
for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
|
|
for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
|
|
a += 32; m <<= 1;
|
|
q3 += 32;
|
|
}
|
|
a = aux8;
|
|
|
|
memcpy(auxs, x[i].scales, 12);
|
|
uint32_t tmp = auxs[2];
|
|
auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
|
|
auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
|
|
auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
|
|
auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
|
|
for (int j = 0; j < QK_K/16; ++j) {
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
|
|
q8 += 8; a += 8;
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
|
|
q8 += 8; a += 8;
|
|
}
|
|
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
|
for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
|
|
}
|
|
for (int l = 0; l < 8; ++l) sumf += sums[l];
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_q4_K * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
static const uint32_t kmask1 = 0x3f3f3f3f;
|
|
static const uint32_t kmask2 = 0x0f0f0f0f;
|
|
static const uint32_t kmask3 = 0x03030303;
|
|
|
|
uint32_t utmp[4];
|
|
|
|
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
|
const uint8_t * mins = (const uint8_t*)&utmp[2];
|
|
|
|
int8_t aux8[QK_K];
|
|
int16_t aux16[8];
|
|
float sums [8];
|
|
int32_t aux32[8];
|
|
memset(sums, 0, 8*sizeof(float));
|
|
|
|
float sumf = 0;
|
|
for (int i = 0; i < nb; ++i) {
|
|
const uint8_t * GGML_RESTRICT q4 = x[i].qs;
|
|
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
|
memset(aux32, 0, 8*sizeof(int32_t));
|
|
int8_t * GGML_RESTRICT a = aux8;
|
|
for (int j = 0; j < QK_K/64; ++j) {
|
|
for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
|
|
a += 32;
|
|
for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
|
|
a += 32; q4 += 32;
|
|
}
|
|
memcpy(utmp, x[i].scales, 12);
|
|
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
|
const uint32_t uaux = utmp[1] & kmask1;
|
|
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
|
utmp[2] = uaux;
|
|
utmp[0] &= kmask1;
|
|
|
|
int sumi = 0;
|
|
for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
|
|
a = aux8;
|
|
int is = 0;
|
|
for (int j = 0; j < QK_K/32; ++j) {
|
|
int32_t scale = scales[is++];
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
}
|
|
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
|
for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
|
|
const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
|
|
sumf -= dmin * sumi;
|
|
}
|
|
for (int l = 0; l < 8; ++l) sumf += sums[l];
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_q5_K * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
static const uint32_t kmask1 = 0x3f3f3f3f;
|
|
static const uint32_t kmask2 = 0x0f0f0f0f;
|
|
static const uint32_t kmask3 = 0x03030303;
|
|
|
|
uint32_t utmp[4];
|
|
|
|
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
|
const uint8_t * mins = (const uint8_t*)&utmp[2];
|
|
|
|
int8_t aux8[QK_K];
|
|
int16_t aux16[8];
|
|
float sums [8];
|
|
int32_t aux32[8];
|
|
memset(sums, 0, 8*sizeof(float));
|
|
|
|
float sumf = 0;
|
|
for (int i = 0; i < nb; ++i) {
|
|
const uint8_t * GGML_RESTRICT q4 = x[i].qs;
|
|
const uint8_t * GGML_RESTRICT hm = x[i].qh;
|
|
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
|
memset(aux32, 0, 8*sizeof(int32_t));
|
|
int8_t * GGML_RESTRICT a = aux8;
|
|
uint8_t m = 1;
|
|
for (int j = 0; j < QK_K/64; ++j) {
|
|
for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
|
|
for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
|
|
a += 32; m <<= 1;
|
|
for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
|
|
for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
|
|
a += 32; m <<= 1;
|
|
q4 += 32;
|
|
}
|
|
memcpy(utmp, x[i].scales, 12);
|
|
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
|
const uint32_t uaux = utmp[1] & kmask1;
|
|
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
|
utmp[2] = uaux;
|
|
utmp[0] &= kmask1;
|
|
|
|
int sumi = 0;
|
|
for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
|
|
a = aux8;
|
|
int is = 0;
|
|
for (int j = 0; j < QK_K/32; ++j) {
|
|
int32_t scale = scales[is++];
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
}
|
|
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
|
for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
|
|
const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
|
|
sumf -= dmin * sumi;
|
|
}
|
|
for (int l = 0; l < 8; ++l) sumf += sums[l];
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_q6_K * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
int8_t aux8[QK_K];
|
|
int16_t aux16[8];
|
|
float sums [8];
|
|
int32_t aux32[8];
|
|
memset(sums, 0, 8*sizeof(float));
|
|
|
|
float sumf = 0;
|
|
for (int i = 0; i < nb; ++i) {
|
|
const uint8_t * GGML_RESTRICT q4 = x[i].ql;
|
|
const uint8_t * GGML_RESTRICT qh = x[i].qh;
|
|
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
|
memset(aux32, 0, 8*sizeof(int32_t));
|
|
int8_t * GGML_RESTRICT a = aux8;
|
|
for (int j = 0; j < QK_K; j += 128) {
|
|
for (int l = 0; l < 32; ++l) {
|
|
a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
|
|
a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
|
|
a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
|
|
a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
|
|
}
|
|
a += 128;
|
|
q4 += 64;
|
|
qh += 32;
|
|
}
|
|
a = aux8;
|
|
int is = 0;
|
|
for (int j = 0; j < QK_K/16; ++j) {
|
|
int scale = x[i].scales[is++];
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
|
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
|
q8 += 8; a += 8;
|
|
}
|
|
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
|
for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
|
|
}
|
|
for (int l = 0; l < 8; ++l) sumf += sums[l];
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_iq2_xxs * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
uint32_t aux32[2];
|
|
const uint8_t * aux8 = (const uint8_t *)aux32;
|
|
|
|
float sumf = 0.f;
|
|
for (int i = 0; i < nb; ++i) {
|
|
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
|
const uint16_t * GGML_RESTRICT q2 = x[i].qs;
|
|
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
|
int32_t bsum = 0;
|
|
for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
|
|
memcpy(aux32, q2, 2*sizeof(uint32_t));
|
|
q2 += 4;
|
|
const uint32_t ls = 2*(aux32[1] >> 28) + 1;
|
|
int32_t sumi = 0;
|
|
for (int l = 0; l < 4; ++l) {
|
|
const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
|
|
const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
|
|
for (int j = 0; j < 8; ++j) {
|
|
sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
|
|
}
|
|
q8 += 8;
|
|
}
|
|
bsum += sumi * ls;
|
|
}
|
|
sumf += d * bsum;
|
|
}
|
|
*s = 0.125f * sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_iq2_xs * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
float sumf = 0.f;
|
|
for (int i = 0; i < nb; ++i) {
|
|
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
|
const uint16_t * GGML_RESTRICT q2 = x[i].qs;
|
|
const uint8_t * GGML_RESTRICT sc = x[i].scales;
|
|
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
|
int32_t bsum = 0;
|
|
for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
|
|
const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
|
|
const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
|
|
int32_t sumi = 0;
|
|
for (int l = 0; l < 2; ++l) {
|
|
const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
|
|
const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
|
|
for (int j = 0; j < 8; ++j) {
|
|
sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
|
|
}
|
|
q8 += 8;
|
|
}
|
|
bsum += sumi * ls1;
|
|
sumi = 0;
|
|
for (int l = 2; l < 4; ++l) {
|
|
const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
|
|
const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
|
|
for (int j = 0; j < 8; ++j) {
|
|
sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
|
|
}
|
|
q8 += 8;
|
|
}
|
|
bsum += sumi * ls2;
|
|
q2 += 4;
|
|
}
|
|
sumf += d * bsum;
|
|
}
|
|
*s = 0.125f * sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_iq2_s * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
float sumf = 0;
|
|
for (int i = 0; i < nb; i++) {
|
|
|
|
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
|
const int8_t * q8 = y[i].qs;
|
|
const uint8_t * qs = x[i].qs;
|
|
const uint8_t * qh = x[i].qh;
|
|
const uint8_t * signs = qs + QK_K/8;
|
|
|
|
int bsum = 0;
|
|
for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
|
|
int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf);
|
|
int ls2 = 1 + 2*(x[i].scales[ib32] >> 4);
|
|
int sumi1 = 0, sumi2 = 0;
|
|
for (int l = 0; l < 2; ++l) {
|
|
const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
|
|
for (int j = 0; j < 8; ++j) {
|
|
sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
|
|
}
|
|
q8 += 8;
|
|
}
|
|
for (int l = 2; l < 4; ++l) {
|
|
const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
|
|
for (int j = 0; j < 8; ++j) {
|
|
sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
|
|
}
|
|
q8 += 8;
|
|
}
|
|
bsum += ls1 * sumi1 + ls2 * sumi2;
|
|
qs += 4;
|
|
signs += 4;
|
|
}
|
|
|
|
sumf += d * bsum;
|
|
}
|
|
|
|
*s = 0.125f * sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_iq3_xxs * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
uint32_t aux32;
|
|
|
|
float sumf = 0.f;
|
|
for (int i = 0; i < nb; ++i) {
|
|
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
|
const uint8_t * GGML_RESTRICT q3 = x[i].qs;
|
|
const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4;
|
|
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
|
int32_t bsum = 0;
|
|
for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
|
|
memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
|
|
const uint32_t ls = 2*(aux32 >> 28) + 1;
|
|
int32_t sumi = 0;
|
|
for (int l = 0; l < 4; ++l) {
|
|
const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
|
|
const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
|
|
const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
|
|
for (int j = 0; j < 4; ++j) {
|
|
sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
|
|
sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
|
|
}
|
|
q8 += 8;
|
|
}
|
|
q3 += 8;
|
|
bsum += sumi * ls;
|
|
}
|
|
sumf += d * bsum;
|
|
}
|
|
*s = 0.25f * sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_iq3_s * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
float sumf = 0.f;
|
|
for (int i = 0; i < nb; ++i) {
|
|
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
|
const uint8_t * GGML_RESTRICT qs = x[i].qs;
|
|
const uint8_t * GGML_RESTRICT qh = x[i].qh;
|
|
const uint8_t * GGML_RESTRICT signs = x[i].signs;
|
|
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
|
int32_t bsum = 0;
|
|
for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
|
|
const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1;
|
|
const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1;
|
|
int32_t sumi = 0;
|
|
for (int l = 0; l < 4; ++l) {
|
|
const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256)));
|
|
const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256)));
|
|
for (int j = 0; j < 4; ++j) {
|
|
sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
|
|
sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
|
|
}
|
|
q8 += 8;
|
|
}
|
|
qs += 8;
|
|
signs += 4;
|
|
bsum += sumi * ls1;
|
|
sumi = 0;
|
|
for (int l = 0; l < 4; ++l) {
|
|
const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256)));
|
|
const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256)));
|
|
for (int j = 0; j < 4; ++j) {
|
|
sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
|
|
sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
|
|
}
|
|
q8 += 8;
|
|
}
|
|
qs += 8;
|
|
signs += 4;
|
|
bsum += sumi * ls2;
|
|
}
|
|
sumf += d * bsum;
|
|
}
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_iq1_s * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
float sumf = 0;
|
|
for (int i = 0; i < nb; i++) {
|
|
|
|
const int8_t * q8 = y[i].qs;
|
|
const uint8_t * qs = x[i].qs;
|
|
const uint16_t * qh = x[i].qh;
|
|
|
|
int sumi = 0, sumi1 = 0;
|
|
for (int ib = 0; ib < QK_K/32; ++ib) {
|
|
const int ls = 2*((qh[ib] >> 12) & 7) + 1;
|
|
const int delta = qh[ib] & 0x8000 ? -1 : 1;
|
|
int lsum = 0;
|
|
for (int l = 0; l < 4; ++l) {
|
|
const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
|
|
for (int j = 0; j < 8; ++j) {
|
|
lsum += q8[j] * grid[j];
|
|
}
|
|
q8 += 8;
|
|
}
|
|
sumi += ls * lsum;
|
|
sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]);
|
|
qs += 4;
|
|
}
|
|
|
|
sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1);
|
|
}
|
|
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(n % QK_K == 0);
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
|
|
const block_iq1_m * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
iq1m_scale_t scale;
|
|
|
|
int sum1[2], sum2[2], delta[4];
|
|
|
|
float sumf = 0;
|
|
for (int i = 0; i < nb; i++) {
|
|
|
|
const int8_t * q8 = y[i].qs;
|
|
const uint8_t * qs = x[i].qs;
|
|
const uint8_t * qh = x[i].qh;
|
|
const uint16_t * sc = (const uint16_t *)x[i].scales;
|
|
|
|
scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
|
|
|
|
int sumi1 = 0, sumi2 = 0;
|
|
for (int ib = 0; ib < QK_K/32; ++ib) {
|
|
delta[0] = qh[0] & 0x08 ? -1 : 1;
|
|
delta[1] = qh[0] & 0x80 ? -1 : 1;
|
|
delta[2] = qh[1] & 0x08 ? -1 : 1;
|
|
delta[3] = qh[1] & 0x80 ? -1 : 1;
|
|
sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0;
|
|
for (int l = 0; l < 4; ++l) {
|
|
const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700)));
|
|
int lsum1 = 0, lsum2 = 0;
|
|
for (int j = 0; j < 8; ++j) {
|
|
lsum1 += q8[j] * grid[j];
|
|
lsum2 += q8[j];
|
|
}
|
|
q8 += 8;
|
|
sum1[l/2] += lsum1;
|
|
sum2[l/2] += lsum2*delta[l];
|
|
}
|
|
|
|
const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1;
|
|
const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1;
|
|
|
|
sumi1 += sum1[0] * ls1 + sum1[1] * ls2;
|
|
sumi2 += sum2[0] * ls1 + sum2[1] * ls2;
|
|
qs += 4;
|
|
qh += 2;
|
|
}
|
|
|
|
sumf += GGML_CPU_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2);
|
|
}
|
|
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
assert(n % QK4_NL == 0);
|
|
static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same");
|
|
|
|
const block_iq4_nl * GGML_RESTRICT x = vx;
|
|
const block_q8_0 * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK4_NL;
|
|
|
|
int ib = 0;
|
|
float sumf = 0;
|
|
|
|
for (; ib < nb; ++ib) {
|
|
const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d);
|
|
int sumi1 = 0, sumi2 = 0;
|
|
for (int j = 0; j < QK4_NL/2; ++j) {
|
|
sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf];
|
|
sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4];
|
|
}
|
|
sumf += d * (sumi1 + sumi2);
|
|
}
|
|
*s = sumf;
|
|
}
|
|
|
|
void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
|
assert(nrc == 1);
|
|
UNUSED(nrc);
|
|
UNUSED(bx);
|
|
UNUSED(by);
|
|
UNUSED(bs);
|
|
assert(n % QK_K == 0);
|
|
|
|
const block_iq4_xs * GGML_RESTRICT x = vx;
|
|
const block_q8_K * GGML_RESTRICT y = vy;
|
|
|
|
const int nb = n / QK_K;
|
|
|
|
float sumf = 0;
|
|
for (int ibl = 0; ibl < nb; ++ibl) {
|
|
const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
|
|
uint16_t h = x[ibl].scales_h;
|
|
const uint8_t * qs = x[ibl].qs;
|
|
const int8_t * q8 = y[ibl].qs;
|
|
for (int ib = 0; ib < QK_K/32; ib += 2) {
|
|
const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
|
|
const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
|
|
h >>= 4;
|
|
const float d1 = d4d8*(ls1 - 32);
|
|
const float d2 = d4d8*(ls2 - 32);
|
|
int sumi1 = 0, sumi2 = 0;
|
|
for (int j = 0; j < 16; ++j) {
|
|
sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
|
|
sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
|
|
}
|
|
sumf += d1 * (sumi1 + sumi2);
|
|
qs += 16;
|
|
q8 += 32;
|
|
sumi1 = sumi2 = 0;
|
|
for (int j = 0; j < 16; ++j) {
|
|
sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
|
|
sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
|
|
}
|
|
sumf += d2 * (sumi1 + sumi2);
|
|
qs += 16;
|
|
q8 += 32;
|
|
}
|
|
}
|
|
*s = sumf;
|
|
}
|
|
|
|
// ============================ 4-bit non-linear quants
|
|
|
|
void quantize_row_iq4_nl(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
|
|
assert(k % QK4_NL == 0);
|
|
quantize_row_iq4_nl_ref(x, y, k);
|
|
}
|
|
|
|
void quantize_row_iq4_xs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
|
|
assert(k % QK_K == 0);
|
|
quantize_iq4_xs(x, y, 1, k, NULL);
|
|
}
|