mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 09:34:37 +00:00
Merge commit '9230dbe2c7
' into concedo_experimental
# Conflicts: # ggml/src/ggml-cpu/CMakeLists.txt # src/llama-graph.cpp # tools/server/README.md
This commit is contained in:
commit
c16d672ce4
29 changed files with 2666 additions and 2457 deletions
|
@ -3212,6 +3212,32 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||||
params.speculative.model.path = value;
|
params.speculative.model.path = value;
|
||||||
}
|
}
|
||||||
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT"));
|
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT"));
|
||||||
|
add_opt(common_arg(
|
||||||
|
{"-ctkd", "--cache-type-k-draft"}, "TYPE",
|
||||||
|
string_format(
|
||||||
|
"KV cache data type for K for the draft model\n"
|
||||||
|
"allowed values: %s\n"
|
||||||
|
"(default: %s)",
|
||||||
|
get_all_kv_cache_types().c_str(),
|
||||||
|
ggml_type_name(params.speculative.cache_type_k)
|
||||||
|
),
|
||||||
|
[](common_params & params, const std::string & value) {
|
||||||
|
params.speculative.cache_type_k = kv_cache_type_from_str(value);
|
||||||
|
}
|
||||||
|
).set_env("LLAMA_ARG_CACHE_TYPE_K_DRAFT"));
|
||||||
|
add_opt(common_arg(
|
||||||
|
{"-ctvd", "--cache-type-v-draft"}, "TYPE",
|
||||||
|
string_format(
|
||||||
|
"KV cache data type for V for the draft model\n"
|
||||||
|
"allowed values: %s\n"
|
||||||
|
"(default: %s)",
|
||||||
|
get_all_kv_cache_types().c_str(),
|
||||||
|
ggml_type_name(params.speculative.cache_type_v)
|
||||||
|
),
|
||||||
|
[](common_params & params, const std::string & value) {
|
||||||
|
params.speculative.cache_type_v = kv_cache_type_from_str(value);
|
||||||
|
}
|
||||||
|
).set_env("LLAMA_ARG_CACHE_TYPE_V_DRAFT"));
|
||||||
|
|
||||||
add_opt(common_arg(
|
add_opt(common_arg(
|
||||||
{"-mv", "--model-vocoder"}, "FNAME",
|
{"-mv", "--model-vocoder"}, "FNAME",
|
||||||
|
|
|
@ -195,6 +195,9 @@ struct common_params_speculative {
|
||||||
float p_split = 0.1f; // speculative decoding split probability
|
float p_split = 0.1f; // speculative decoding split probability
|
||||||
float p_min = 0.75f; // minimum speculative decoding probability (greedy)
|
float p_min = 0.75f; // minimum speculative decoding probability (greedy)
|
||||||
|
|
||||||
|
ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K
|
||||||
|
ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V
|
||||||
|
|
||||||
struct cpu_params cpuparams;
|
struct cpu_params cpuparams;
|
||||||
struct cpu_params cpuparams_batch;
|
struct cpu_params cpuparams_batch;
|
||||||
|
|
||||||
|
|
|
@ -256,7 +256,6 @@ void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
UNUSED(blocklen);
|
UNUSED(blocklen);
|
||||||
|
|
||||||
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
||||||
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
|
|
||||||
const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx;
|
const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx;
|
||||||
|
|
||||||
for (int c = 0; c < nc; c += ncols_interleaved) {
|
for (int c = 0; c < nc; c += ncols_interleaved) {
|
||||||
|
@ -294,7 +293,6 @@ void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
s += ncols_interleaved;
|
s += ncols_interleaved;
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
||||||
float sumf[4];
|
float sumf[4];
|
||||||
int sumi;
|
int sumi;
|
||||||
|
@ -341,7 +339,6 @@ void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
UNUSED(blocklen);
|
UNUSED(blocklen);
|
||||||
|
|
||||||
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
||||||
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
|
|
||||||
const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx;
|
const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx;
|
||||||
|
|
||||||
for (int c = 0; c < nc; c += ncols_interleaved) {
|
for (int c = 0; c < nc; c += ncols_interleaved) {
|
||||||
|
@ -384,7 +381,6 @@ void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
s += ncols_interleaved;
|
s += ncols_interleaved;
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
||||||
float sumf[4];
|
float sumf[4];
|
||||||
int sumi;
|
int sumi;
|
||||||
|
@ -432,7 +428,7 @@ void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
|
|
||||||
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
|
||||||
#if defined(__ARM_FEATURE_SVE)
|
#if defined(__ARM_FEATURE_SVE)
|
||||||
if (ggml_cpu_has_sve() && ggml_cpu_get_sve_cnt() == QK8_0) {
|
if (ggml_cpu_get_sve_cnt() == QK8_0) {
|
||||||
const void * b_ptr = vx;
|
const void * b_ptr = vx;
|
||||||
const void * a_ptr = vy;
|
const void * a_ptr = vy;
|
||||||
float * res_ptr = s;
|
float * res_ptr = s;
|
||||||
|
@ -547,7 +543,6 @@ void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const
|
||||||
UNUSED(blocklen);
|
UNUSED(blocklen);
|
||||||
|
|
||||||
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
||||||
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
|
|
||||||
const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl);
|
const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl);
|
||||||
const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
|
const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
|
||||||
float * res_ptr = s;
|
float * res_ptr = s;
|
||||||
|
@ -594,7 +589,6 @@ void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const
|
||||||
vst1q_f32(res_ptr + x * 4, sumf);
|
vst1q_f32(res_ptr + x * 4, sumf);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
|
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
|
||||||
{
|
{
|
||||||
float sumf[4];
|
float sumf[4];
|
||||||
|
@ -643,8 +637,7 @@ void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
UNUSED(ncols_interleaved);
|
UNUSED(ncols_interleaved);
|
||||||
UNUSED(blocklen);
|
UNUSED(blocklen);
|
||||||
|
|
||||||
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
||||||
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
|
|
||||||
const void * b_ptr = vx;
|
const void * b_ptr = vx;
|
||||||
const void * a_ptr = vy;
|
const void * a_ptr = vy;
|
||||||
float * res_ptr = s;
|
float * res_ptr = s;
|
||||||
|
@ -1101,7 +1094,6 @@ void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
|
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
|
||||||
);
|
);
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
|
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
|
||||||
{
|
{
|
||||||
float sumf[4][4];
|
float sumf[4][4];
|
||||||
|
@ -1160,7 +1152,6 @@ void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
UNUSED(blocklen);
|
UNUSED(blocklen);
|
||||||
|
|
||||||
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
|
||||||
if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) {
|
|
||||||
const void * b_ptr = vx;
|
const void * b_ptr = vx;
|
||||||
const void * a_ptr = vy;
|
const void * a_ptr = vy;
|
||||||
float * res_ptr = s;
|
float * res_ptr = s;
|
||||||
|
@ -1557,7 +1548,6 @@ void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
|
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
|
||||||
);
|
);
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
|
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
|
||||||
float sumf[4][4];
|
float sumf[4][4];
|
||||||
int sumi;
|
int sumi;
|
||||||
|
@ -1615,7 +1605,7 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo
|
||||||
|
|
||||||
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
|
||||||
#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
|
#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
|
||||||
if (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0) {
|
if (ggml_cpu_get_sve_cnt() == QK8_0) {
|
||||||
const void * b_ptr = vx;
|
const void * b_ptr = vx;
|
||||||
const void * a_ptr = vy;
|
const void * a_ptr = vy;
|
||||||
float * res_ptr = s;
|
float * res_ptr = s;
|
||||||
|
@ -2083,7 +2073,6 @@ void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const
|
||||||
UNUSED(blocklen);
|
UNUSED(blocklen);
|
||||||
|
|
||||||
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
|
||||||
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
|
|
||||||
const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl);
|
const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl);
|
||||||
|
|
||||||
for (int y = 0; y < nr / 4; y++) {
|
for (int y = 0; y < nr / 4; y++) {
|
||||||
|
@ -2135,7 +2124,6 @@ void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
|
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
|
||||||
{
|
{
|
||||||
float sumf[4][4];
|
float sumf[4][4];
|
||||||
|
|
|
@ -78,13 +78,8 @@
|
||||||
#endif
|
#endif
|
||||||
#if defined(__ARM_ARCH)
|
#if defined(__ARM_ARCH)
|
||||||
struct ggml_arm_arch_features_type {
|
struct ggml_arm_arch_features_type {
|
||||||
int has_neon;
|
|
||||||
int has_dotprod;
|
|
||||||
int has_i8mm;
|
|
||||||
int has_sve;
|
|
||||||
int sve_cnt;
|
int sve_cnt;
|
||||||
int has_sme;
|
} ggml_arm_arch_features = { 0 };
|
||||||
} ggml_arm_arch_features = {-1, -1, -1, -1, 0, -1};
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
@ -683,87 +678,15 @@ bool ggml_is_numa(void) {
|
||||||
|
|
||||||
#if defined(__linux__) && defined(__aarch64__)
|
#if defined(__linux__) && defined(__aarch64__)
|
||||||
#include <sys/auxv.h>
|
#include <sys/auxv.h>
|
||||||
#elif defined(__APPLE__)
|
|
||||||
#include <sys/sysctl.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if !defined(HWCAP2_I8MM)
|
|
||||||
#define HWCAP2_I8MM (1 << 13)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if !defined(HWCAP2_SME)
|
|
||||||
#define HWCAP2_SME (1 << 23)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void ggml_init_arm_arch_features(void) {
|
static void ggml_init_arm_arch_features(void) {
|
||||||
#if defined(__linux__) && defined(__aarch64__)
|
#if defined(__linux__) && defined(__aarch64__) && defined(__ARM_FEATURE_SVE)
|
||||||
uint32_t hwcap = getauxval(AT_HWCAP);
|
|
||||||
uint32_t hwcap2 = getauxval(AT_HWCAP2);
|
|
||||||
|
|
||||||
ggml_arm_arch_features.has_neon = !!(hwcap & HWCAP_ASIMD);
|
|
||||||
ggml_arm_arch_features.has_dotprod = !!(hwcap & HWCAP_ASIMDDP);
|
|
||||||
ggml_arm_arch_features.has_i8mm = !!(hwcap2 & HWCAP2_I8MM);
|
|
||||||
ggml_arm_arch_features.has_sve = !!(hwcap & HWCAP_SVE);
|
|
||||||
ggml_arm_arch_features.has_sme = !!(hwcap2 & HWCAP2_SME);
|
|
||||||
|
|
||||||
#if defined(__ARM_FEATURE_SVE)
|
|
||||||
ggml_arm_arch_features.sve_cnt = PR_SVE_VL_LEN_MASK & prctl(PR_SVE_GET_VL);
|
ggml_arm_arch_features.sve_cnt = PR_SVE_VL_LEN_MASK & prctl(PR_SVE_GET_VL);
|
||||||
#endif
|
#endif
|
||||||
#elif defined(__APPLE__)
|
|
||||||
int oldp = 0;
|
|
||||||
size_t size = sizeof(oldp);
|
|
||||||
if (sysctlbyname("hw.optional.AdvSIMD", &oldp, &size, NULL, 0) != 0) {
|
|
||||||
oldp = 0;
|
|
||||||
}
|
}
|
||||||
ggml_arm_arch_features.has_neon = oldp;
|
|
||||||
|
|
||||||
if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &oldp, &size, NULL, 0) != 0) {
|
#endif // __ARM_ARCH
|
||||||
oldp = 0;
|
|
||||||
}
|
|
||||||
ggml_arm_arch_features.has_dotprod = oldp;
|
|
||||||
|
|
||||||
if (sysctlbyname("hw.optional.arm.FEAT_I8MM", &oldp, &size, NULL, 0) != 0) {
|
|
||||||
oldp = 0;
|
|
||||||
}
|
|
||||||
ggml_arm_arch_features.has_i8mm = oldp;
|
|
||||||
|
|
||||||
if (sysctlbyname("hw.optional.arm.FEAT_SME", &oldp, &size, NULL, 0) != 0) {
|
|
||||||
oldp = 0;
|
|
||||||
}
|
|
||||||
ggml_arm_arch_features.has_sme = oldp;
|
|
||||||
|
|
||||||
ggml_arm_arch_features.has_sve = 0;
|
|
||||||
ggml_arm_arch_features.sve_cnt = 0;
|
|
||||||
#else
|
|
||||||
// Run-time CPU feature detection not implemented for this platform, fallback to compile time
|
|
||||||
#if defined(__ARM_NEON)
|
|
||||||
ggml_arm_arch_features.has_neon = 1;
|
|
||||||
#else
|
|
||||||
ggml_arm_arch_features.has_neon = 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__ARM_FEATURE_MATMUL_INT8)
|
|
||||||
ggml_arm_arch_features.has_i8mm = 1;
|
|
||||||
#else
|
|
||||||
ggml_arm_arch_features.has_i8mm = 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__ARM_FEATURE_SVE)
|
|
||||||
ggml_arm_arch_features.has_sve = 1;
|
|
||||||
ggml_arm_arch_features.sve_cnt = 16;
|
|
||||||
#else
|
|
||||||
ggml_arm_arch_features.has_sve = 0;
|
|
||||||
ggml_arm_arch_features.sve_cnt = 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__ARM_FEATURE_SME) || defined(__ARM_FEATURE_SME2)
|
|
||||||
ggml_arm_arch_features.has_sme = 1;
|
|
||||||
#else
|
|
||||||
ggml_arm_arch_features.has_sme = 0;
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
|
struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
|
||||||
GGML_ASSERT(!ggml_get_no_alloc(ctx));
|
GGML_ASSERT(!ggml_get_no_alloc(ctx));
|
||||||
|
@ -3472,7 +3395,7 @@ int ggml_cpu_has_vxe(void) {
|
||||||
|
|
||||||
int ggml_cpu_has_neon(void) {
|
int ggml_cpu_has_neon(void) {
|
||||||
#if defined(__ARM_ARCH) && defined(__ARM_NEON)
|
#if defined(__ARM_ARCH) && defined(__ARM_NEON)
|
||||||
return ggml_arm_arch_features.has_neon;
|
return 1;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
@ -3480,7 +3403,7 @@ int ggml_cpu_has_neon(void) {
|
||||||
|
|
||||||
int ggml_cpu_has_dotprod(void) {
|
int ggml_cpu_has_dotprod(void) {
|
||||||
#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD)
|
#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD)
|
||||||
return ggml_arm_arch_features.has_dotprod;
|
return 1;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
@ -3488,7 +3411,7 @@ int ggml_cpu_has_dotprod(void) {
|
||||||
|
|
||||||
int ggml_cpu_has_sve(void) {
|
int ggml_cpu_has_sve(void) {
|
||||||
#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE)
|
#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE)
|
||||||
return ggml_arm_arch_features.has_sve;
|
return 1;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
@ -3496,7 +3419,7 @@ int ggml_cpu_has_sve(void) {
|
||||||
|
|
||||||
int ggml_cpu_has_matmul_int8(void) {
|
int ggml_cpu_has_matmul_int8(void) {
|
||||||
#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8)
|
#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8)
|
||||||
return ggml_arm_arch_features.has_i8mm;
|
return 1;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
@ -3512,7 +3435,7 @@ int ggml_cpu_get_sve_cnt(void) {
|
||||||
|
|
||||||
int ggml_cpu_has_sme(void) {
|
int ggml_cpu_has_sme(void) {
|
||||||
#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SME)
|
#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SME)
|
||||||
return ggml_arm_arch_features.has_sme;
|
return 1;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
161
ggml/src/ggml-cuda/conv2d-dw.cu
Normal file
161
ggml/src/ggml-cuda/conv2d-dw.cu
Normal file
|
@ -0,0 +1,161 @@
|
||||||
|
#include "conv2d-dw.cuh"
|
||||||
|
|
||||||
|
struct conv_params {
|
||||||
|
int in_w, in_h;
|
||||||
|
int out_w, out_h;
|
||||||
|
int kernel_w, kernel_h;
|
||||||
|
int stride_x, stride_y;
|
||||||
|
int padding_x, padding_y;
|
||||||
|
int dilation_x, dilation_y;
|
||||||
|
int channels, batches;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct kernel_bounds {
|
||||||
|
int y_min, y_max;
|
||||||
|
int x_min, x_max;
|
||||||
|
};
|
||||||
|
|
||||||
|
__device__ __forceinline__ kernel_bounds calculate_kernel_bounds(int out_x, int out_y, const conv_params & params) {
|
||||||
|
kernel_bounds bounds;
|
||||||
|
bounds.y_min = max(0, (params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y);
|
||||||
|
bounds.y_max =
|
||||||
|
min(params.kernel_h,
|
||||||
|
(params.in_h + params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y);
|
||||||
|
bounds.x_min = max(0, (params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x);
|
||||||
|
bounds.x_max =
|
||||||
|
min(params.kernel_w,
|
||||||
|
(params.in_w + params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x);
|
||||||
|
return bounds;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ __forceinline__ int calculate_input_coord(int out_coord, int kern_coord, int stride, int dilation, int padding) {
|
||||||
|
return out_coord * stride + kern_coord * dilation - padding;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct whcn_layout {
|
||||||
|
__device__ static int input_index(int n, int c, int y, int x, const conv_params & params) {
|
||||||
|
return n * (params.channels * params.in_w * params.in_h) + c * params.in_w * params.in_h + y * params.in_w + x;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ static int kernel_index(int c, int ky, int kx, const conv_params & params) {
|
||||||
|
return c * params.kernel_h * params.kernel_w + ky * params.kernel_w + kx;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ static int output_index(int n, int c, int y, int x, const conv_params & params) {
|
||||||
|
return n * (params.channels * params.out_w * params.out_h) + c * params.out_w * params.out_h +
|
||||||
|
y * params.out_w + x;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ static void unpack_indices(int global_idx, const conv_params & params, int & n, int & c, int & out_y,
|
||||||
|
int & out_x) {
|
||||||
|
out_x = global_idx % params.out_w;
|
||||||
|
out_y = (global_idx / params.out_w) % params.out_h;
|
||||||
|
c = (global_idx / (params.out_w * params.out_h)) % params.channels;
|
||||||
|
n = global_idx / (params.out_w * params.out_h * params.channels);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct cwhn_layout {
|
||||||
|
__device__ static int input_index(int n, int c, int y, int x, const conv_params & params) {
|
||||||
|
return n * (params.channels * params.in_w * params.in_h) + (y * params.in_w + x) * params.channels + c;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ static int kernel_index(int c, int ky, int kx, const conv_params & params) {
|
||||||
|
return (ky * params.kernel_w + kx) * params.channels + c;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ static int output_index(int n, int c, int y, int x, const conv_params & params) {
|
||||||
|
return n * (params.channels * params.out_w * params.out_h) + y * (params.out_w * params.channels) +
|
||||||
|
x * params.channels + c;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ static void unpack_indices(int global_idx, const conv_params & params, int & n, int & c, int & out_y,
|
||||||
|
int & out_x) {
|
||||||
|
c = global_idx % params.channels;
|
||||||
|
out_x = (global_idx / params.channels) % params.out_w;
|
||||||
|
out_y = (global_idx / (params.channels * params.out_w)) % params.out_h;
|
||||||
|
n = global_idx / (params.channels * params.out_w * params.out_h);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T, typename Layout>
|
||||||
|
__global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restrict__ kernel, T * __restrict__ output,
|
||||||
|
const int in_w, const int in_h, const int out_w, const int out_h,
|
||||||
|
const int kernel_w, const int kernel_h, const int stride_x, const int stride_y,
|
||||||
|
const int padding_x, const int padding_y, const int dilation_x, const int dilation_y,
|
||||||
|
const int channels, const int batches) {
|
||||||
|
const int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
|
const int total_elements = batches * channels * out_h * out_w;
|
||||||
|
|
||||||
|
if (global_idx >= total_elements) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
conv_params params = { in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x,
|
||||||
|
stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches };
|
||||||
|
|
||||||
|
int batch_idx, channel_idx, out_y_idx, out_x_idx;
|
||||||
|
Layout::unpack_indices(global_idx, params, batch_idx, channel_idx, out_y_idx, out_x_idx);
|
||||||
|
|
||||||
|
T accumulator = 0;
|
||||||
|
kernel_bounds bounds = calculate_kernel_bounds(out_x_idx, out_y_idx, params);
|
||||||
|
|
||||||
|
for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) {
|
||||||
|
int in_y_idx = calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y);
|
||||||
|
|
||||||
|
for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) {
|
||||||
|
int in_x_idx = calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x);
|
||||||
|
|
||||||
|
const T input_val = input[Layout::input_index(batch_idx, channel_idx, in_y_idx, in_x_idx, params)];
|
||||||
|
const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)];
|
||||||
|
|
||||||
|
accumulator += input_val * kernel_val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output[Layout::output_index(batch_idx, channel_idx, out_y_idx, out_x_idx, params)] = accumulator;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * kernel = dst->src[0];
|
||||||
|
const ggml_tensor * input = dst->src[1];
|
||||||
|
|
||||||
|
GGML_ASSERT(kernel->type == GGML_TYPE_F32 && input->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
|
||||||
|
const float * w_d = (const float *) kernel->data;
|
||||||
|
const float * x_d = (const float *) input->data;
|
||||||
|
float * y_d = (float *) dst->data;
|
||||||
|
|
||||||
|
const int32_t * p = (const int32_t *) dst->op_params;
|
||||||
|
const int stride_x = p[0];
|
||||||
|
const int stride_y = p[1];
|
||||||
|
const int padding_x = p[2];
|
||||||
|
const int padding_y = p[3];
|
||||||
|
const int dilation_x = p[4];
|
||||||
|
const int dilation_y = p[5];
|
||||||
|
|
||||||
|
const int in_w = input->ne[0];
|
||||||
|
const int in_h = input->ne[1];
|
||||||
|
const int kernel_w = kernel->ne[0];
|
||||||
|
const int kernel_h = kernel->ne[1];
|
||||||
|
const int out_w = dst->ne[0];
|
||||||
|
const int out_h = dst->ne[1];
|
||||||
|
const int channels = dst->ne[2];
|
||||||
|
const int batches = dst->ne[3];
|
||||||
|
|
||||||
|
cudaStream_t st = ctx.stream();
|
||||||
|
|
||||||
|
const int total = batches * channels * out_h * out_w;
|
||||||
|
const int blocks = (total + CUDA_CONV2D_DW_BLOCK_SIZE - 1) / CUDA_CONV2D_DW_BLOCK_SIZE;
|
||||||
|
|
||||||
|
if (ggml_is_contiguous(input)) {
|
||||||
|
conv2d_dw_kernel<float, whcn_layout><<<blocks, CUDA_CONV2D_DW_BLOCK_SIZE, 0, st>>>(
|
||||||
|
x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y,
|
||||||
|
dilation_x, dilation_y, channels, batches);
|
||||||
|
} else if (ggml_is_contiguous_channels(input)) {
|
||||||
|
conv2d_dw_kernel<float, cwhn_layout><<<blocks, CUDA_CONV2D_DW_BLOCK_SIZE, 0, st>>>(
|
||||||
|
x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y,
|
||||||
|
dilation_x, dilation_y, channels, batches);
|
||||||
|
} else {
|
||||||
|
GGML_ABORT("Unsupported memory layout for conv_2d_dw");
|
||||||
|
}
|
||||||
|
}
|
5
ggml/src/ggml-cuda/conv2d-dw.cuh
Normal file
5
ggml/src/ggml-cuda/conv2d-dw.cuh
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
#pragma once
|
||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_CONV2D_DW_BLOCK_SIZE 256
|
||||||
|
void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
@ -13,6 +13,7 @@ bool g_mul_mat_q = true;
|
||||||
#include "ggml-cuda/clamp.cuh"
|
#include "ggml-cuda/clamp.cuh"
|
||||||
#include "ggml-cuda/concat.cuh"
|
#include "ggml-cuda/concat.cuh"
|
||||||
#include "ggml-cuda/conv-transpose-1d.cuh"
|
#include "ggml-cuda/conv-transpose-1d.cuh"
|
||||||
|
#include "ggml-cuda/conv2d-dw.cuh"
|
||||||
#include "ggml-cuda/convert.cuh"
|
#include "ggml-cuda/convert.cuh"
|
||||||
#include "ggml-cuda/count-equal.cuh"
|
#include "ggml-cuda/count-equal.cuh"
|
||||||
#include "ggml-cuda/cpy.cuh"
|
#include "ggml-cuda/cpy.cuh"
|
||||||
|
@ -2315,6 +2316,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
ggml_cuda_op_im2col(ctx, dst);
|
ggml_cuda_op_im2col(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_OP_CONV_2D_DW:
|
||||||
|
ggml_cuda_op_conv2d_dw(ctx, dst);
|
||||||
|
break;
|
||||||
case GGML_OP_CONV_TRANSPOSE_1D:
|
case GGML_OP_CONV_TRANSPOSE_1D:
|
||||||
ggml_cuda_op_conv_transpose_1d(ctx,dst);
|
ggml_cuda_op_conv_transpose_1d(ctx,dst);
|
||||||
break;
|
break;
|
||||||
|
@ -3214,6 +3218,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
||||||
return op->src[0]->nb[0] == ggml_type_size(op->src[0]->type) && ggml_is_contiguous_2(op->src[0]);
|
return op->src[0]->nb[0] == ggml_type_size(op->src[0]->type) && ggml_is_contiguous_2(op->src[0]);
|
||||||
}
|
}
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
|
case GGML_OP_CONV_2D_DW:
|
||||||
case GGML_OP_POOL_2D:
|
case GGML_OP_POOL_2D:
|
||||||
case GGML_OP_SUM:
|
case GGML_OP_SUM:
|
||||||
case GGML_OP_SUM_ROWS:
|
case GGML_OP_SUM_ROWS:
|
||||||
|
|
|
@ -7,7 +7,10 @@ import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Callable, Sequence, Mapping, Iterable, Protocol, ClassVar, runtime_checkable
|
from typing import Any, Callable, Sequence, Mapping, Iterable, Protocol, ClassVar, runtime_checkable
|
||||||
|
|
||||||
|
try:
|
||||||
from sentencepiece import SentencePieceProcessor
|
from sentencepiece import SentencePieceProcessor
|
||||||
|
except ImportError:
|
||||||
|
SentencePieceProcessor = None
|
||||||
|
|
||||||
import gguf
|
import gguf
|
||||||
|
|
||||||
|
@ -302,6 +305,9 @@ class SentencePieceVocab(Vocab):
|
||||||
name = "spm"
|
name = "spm"
|
||||||
|
|
||||||
def __init__(self, base_path: Path):
|
def __init__(self, base_path: Path):
|
||||||
|
if SentencePieceProcessor is None:
|
||||||
|
raise RuntimeError("sentencepiece is not installed")
|
||||||
|
|
||||||
added_tokens: dict[str, int] = {}
|
added_tokens: dict[str, int] = {}
|
||||||
if (fname_tokenizer := base_path / 'tokenizer.model').exists():
|
if (fname_tokenizer := base_path / 'tokenizer.model').exists():
|
||||||
# normal location
|
# normal location
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "gguf"
|
name = "gguf"
|
||||||
version = "0.17.0"
|
version = "0.17.1"
|
||||||
description = "Read and write ML models in GGUF for GGML"
|
description = "Read and write ML models in GGUF for GGML"
|
||||||
authors = ["GGML <ggml@ggml.ai>"]
|
authors = ["GGML <ggml@ggml.ai>"]
|
||||||
packages = [
|
packages = [
|
||||||
|
@ -22,7 +22,7 @@ python = ">=3.8"
|
||||||
numpy = ">=1.17"
|
numpy = ">=1.17"
|
||||||
tqdm = ">=4.27"
|
tqdm = ">=4.27"
|
||||||
pyyaml = ">=5.1"
|
pyyaml = ">=5.1"
|
||||||
sentencepiece = ">=0.1.98,<=0.2.0"
|
sentencepiece = { version = ">=0.1.98,<=0.2.0", optional = true }
|
||||||
PySide6 = { version = "^6.9", python = ">=3.9,<3.14", optional = true }
|
PySide6 = { version = "^6.9", python = ">=3.9,<3.14", optional = true }
|
||||||
|
|
||||||
[tool.poetry.dev-dependencies]
|
[tool.poetry.dev-dependencies]
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
#include "llama-batch.h"
|
#include "llama-batch.h"
|
||||||
|
|
||||||
#include "llama-impl.h"
|
#include "llama-impl.h"
|
||||||
#include "llama-cparams.h"
|
|
||||||
#include "llama-vocab.h"
|
#include "llama-vocab.h"
|
||||||
#include "llama-memory.h"
|
#include "llama-memory.h"
|
||||||
|
|
||||||
|
@ -10,282 +9,7 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) {
|
llama_batch_allocr::llama_batch_allocr(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {
|
||||||
// clear empty sequences
|
|
||||||
// the previous ubatch is assumed to be gone,
|
|
||||||
// so nothing should refer to values in these sequences anymore.
|
|
||||||
for (size_t i = seq.size(); i-- > 0;) {
|
|
||||||
if (seq[i].length == 0) {
|
|
||||||
seq.pop_back();
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
udatas.push_back({});
|
|
||||||
|
|
||||||
auto & udata = udatas.back();
|
|
||||||
|
|
||||||
udata.token.resize(!has_embd ? n_ubatch : 0);
|
|
||||||
udata.embd.resize(has_embd ? n_embd * n_ubatch : 0);
|
|
||||||
udata.pos.resize(n_ubatch);
|
|
||||||
udata.n_seq_id.resize(n_ubatch);
|
|
||||||
udata.seq_id.resize(n_ubatch);
|
|
||||||
udata.output.resize(n_ubatch);
|
|
||||||
|
|
||||||
llama_ubatch ubatch = {
|
|
||||||
/*equal_seqs =*/ true,
|
|
||||||
/*n_tokens =*/ 0,
|
|
||||||
/*n_seq_tokens =*/ 0,
|
|
||||||
/*n_seqs =*/ 0,
|
|
||||||
/*token =*/ !has_embd ? udata.token.data() : nullptr,
|
|
||||||
/*embd =*/ has_embd ? udata.embd.data() : nullptr,
|
|
||||||
/*pos =*/ udata.pos.data(),
|
|
||||||
/*n_seq_id =*/ udata.n_seq_id.data(),
|
|
||||||
/*seq_id =*/ udata.seq_id.data(),
|
|
||||||
/*output =*/ udata.output.data(),
|
|
||||||
};
|
|
||||||
|
|
||||||
return ubatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) {
|
|
||||||
GGML_ASSERT(batch != nullptr);
|
|
||||||
GGML_ASSERT(length <= seq.length);
|
|
||||||
// Can only add sequences of equal lengths to a batch,
|
|
||||||
// otherwise it isn't clear to which sequence a token belongs
|
|
||||||
GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs);
|
|
||||||
GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs);
|
|
||||||
// NOTE: loops are separated for cache-friendliness
|
|
||||||
if (batch->token) {
|
|
||||||
if (ubatch.equal_seqs) {
|
|
||||||
for (size_t i = 0; i < length; ++i) {
|
|
||||||
ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]];
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// simple split
|
|
||||||
ubatch.token = batch->token + seq.offset;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ubatch.token = nullptr;
|
|
||||||
}
|
|
||||||
if (batch->embd) {
|
|
||||||
if (ubatch.equal_seqs) {
|
|
||||||
for (size_t i = 0; i < length; ++i) {
|
|
||||||
memcpy(
|
|
||||||
ubatch.embd + (n_embd * (ubatch.n_tokens + i)),
|
|
||||||
batch->embd + (n_embd * ids[seq.offset + i]),
|
|
||||||
n_embd * sizeof(float)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// simple split
|
|
||||||
ubatch.embd = batch->embd + (n_embd * seq.offset);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ubatch.embd = nullptr;
|
|
||||||
}
|
|
||||||
if (ubatch.equal_seqs) {
|
|
||||||
for (size_t i = 0; i < length; ++i) {
|
|
||||||
ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// simple split
|
|
||||||
ubatch.pos = batch->pos + seq.offset;
|
|
||||||
}
|
|
||||||
if (ubatch.equal_seqs) {
|
|
||||||
ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
|
|
||||||
if (seq.seq_id) {
|
|
||||||
ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// simple split
|
|
||||||
if (batch->n_seq_id) {
|
|
||||||
ubatch.n_seq_id = batch->n_seq_id + seq.offset;
|
|
||||||
} else {
|
|
||||||
for (size_t i = 0; i < length; ++i) {
|
|
||||||
ubatch.n_seq_id[ubatch.n_seqs + i] = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (batch->seq_id) {
|
|
||||||
ubatch.seq_id = batch->seq_id + seq.offset;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (batch->logits) {
|
|
||||||
if (ubatch.equal_seqs) {
|
|
||||||
for (size_t i = 0; i < length; ++i) {
|
|
||||||
size_t id = ids[seq.offset + i];
|
|
||||||
int8_t is_output = batch->logits[id];
|
|
||||||
ubatch.output[ubatch.n_tokens + i] = is_output;
|
|
||||||
if (is_output) { out_ids.push_back(id); }
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// simple split
|
|
||||||
ubatch.output = batch->logits + seq.offset;
|
|
||||||
for (size_t i = 0; i < length; ++i) {
|
|
||||||
if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// only get last output
|
|
||||||
for (size_t i = 0; i < length; ++i) {
|
|
||||||
size_t id = ids[seq.offset + i];
|
|
||||||
int8_t is_last = id == ids.size() - 1;
|
|
||||||
ubatch.output[ubatch.n_tokens + i] = is_last;
|
|
||||||
if (is_last) { out_ids.push_back(id); }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) {
|
|
||||||
ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1;
|
|
||||||
}
|
|
||||||
ubatch.n_tokens += length;
|
|
||||||
ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits
|
|
||||||
seq.offset += length;
|
|
||||||
seq.length -= length;
|
|
||||||
n_tokens -= length;
|
|
||||||
GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs);
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_ubatch llama_sbatch::split_simple(size_t n_ubatch) {
|
|
||||||
n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
|
|
||||||
llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
|
|
||||||
ubatch.equal_seqs = false;
|
|
||||||
if (!seq.empty()) {
|
|
||||||
llama_sbatch_seq & s = seq[0];
|
|
||||||
size_t length = s.length < n_ubatch ? s.length : n_ubatch;
|
|
||||||
GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits
|
|
||||||
add_seq_to_ubatch(ubatch, s, length);
|
|
||||||
}
|
|
||||||
return ubatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_ubatch llama_sbatch::split_equal(size_t n_ubatch) {
|
|
||||||
n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
|
|
||||||
llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
|
|
||||||
if (!seq.empty()) {
|
|
||||||
size_t length = 0;
|
|
||||||
size_t n_tokens_in_ubatch = 0;
|
|
||||||
GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits
|
|
||||||
// smallest first, because it's easier to split this way;
|
|
||||||
// starting from the end to pop in constant time.
|
|
||||||
for (size_t i = seq.size(); i-- > 0;) {
|
|
||||||
llama_sbatch_seq & s = seq[i];
|
|
||||||
GGML_ASSERT(s.length > 0);
|
|
||||||
if (length == 0) {
|
|
||||||
length = s.length < n_ubatch ? s.length : n_ubatch;
|
|
||||||
}
|
|
||||||
add_seq_to_ubatch(ubatch, s, length);
|
|
||||||
n_tokens_in_ubatch += length;
|
|
||||||
// shared prompts can't be mixed with any of their sequences,
|
|
||||||
// so it's safer to compute them in their own ubatch
|
|
||||||
if (s.n_seq_id > 1) { break; }
|
|
||||||
// stop when there isn't enough space for another sequence
|
|
||||||
if (length + n_tokens_in_ubatch > n_ubatch) { break; }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ubatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) {
|
|
||||||
n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
|
|
||||||
llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
|
|
||||||
if (!seq.empty()) {
|
|
||||||
llama_sbatch_seq & s = seq[seq.size() - 1];
|
|
||||||
size_t length = s.length < n_ubatch ? s.length : n_ubatch;
|
|
||||||
GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits
|
|
||||||
add_seq_to_ubatch(ubatch, s, length);
|
|
||||||
}
|
|
||||||
return ubatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split) {
|
|
||||||
GGML_ASSERT(batch.n_tokens >= 0);
|
|
||||||
this->batch = &batch;
|
|
||||||
this->n_embd = n_embd;
|
|
||||||
|
|
||||||
n_tokens = batch.n_tokens;
|
|
||||||
ids.resize(n_tokens);
|
|
||||||
out_ids.clear();
|
|
||||||
// TODO: reserve out_ids and seq
|
|
||||||
|
|
||||||
for (size_t i = 0; i < n_tokens; ++i) {
|
|
||||||
ids[i] = i;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (simple_split) {
|
|
||||||
seq.resize(1);
|
|
||||||
llama_sbatch_seq & s = seq[0];
|
|
||||||
s.n_seq_id = 0;
|
|
||||||
s.seq_id = nullptr;
|
|
||||||
s.offset = 0;
|
|
||||||
s.length = n_tokens;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::sort(ids.begin(), ids.end(),
|
|
||||||
[&batch](size_t a, size_t b) {
|
|
||||||
int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
|
|
||||||
int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1;
|
|
||||||
// sort by seq_id, then by pos
|
|
||||||
if (n_seq_a == n_seq_b) {
|
|
||||||
if (batch.seq_id) {
|
|
||||||
for (int32_t i = 0; i < n_seq_a; ++i) {
|
|
||||||
llama_seq_id seq_id_a = batch.seq_id[a][i];
|
|
||||||
llama_seq_id seq_id_b = batch.seq_id[b][i];
|
|
||||||
// smaller seq_ids go first
|
|
||||||
if (seq_id_a != seq_id_b) {
|
|
||||||
return seq_id_a < seq_id_b;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// when all else is equal, sort by pos
|
|
||||||
if (batch.pos) {
|
|
||||||
return batch.pos[a] < batch.pos[b];
|
|
||||||
}
|
|
||||||
// no pos, sort by id
|
|
||||||
return a < b;
|
|
||||||
}
|
|
||||||
// shared prompts go first
|
|
||||||
return n_seq_a > n_seq_b;
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
// init seq
|
|
||||||
llama_sbatch_seq * last_seq = nullptr;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < n_tokens; ++i) {
|
|
||||||
const size_t bi = ids[i];
|
|
||||||
const int32_t n_seqs = batch.n_seq_id[bi];
|
|
||||||
llama_seq_id * seq_ids = batch.seq_id[bi];
|
|
||||||
if (last_seq != nullptr) {
|
|
||||||
bool same = n_seqs == last_seq->n_seq_id;
|
|
||||||
for (int32_t j = 0; same && j < n_seqs; ++j) {
|
|
||||||
if (seq_ids[j] != last_seq->seq_id[j]) {
|
|
||||||
same = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (same) {
|
|
||||||
last_seq->length += 1;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1};
|
|
||||||
seq.push_back(new_seq);
|
|
||||||
last_seq = &seq.back();
|
|
||||||
}
|
|
||||||
|
|
||||||
// keep shared prompts first at the end, then sort by length descending.
|
|
||||||
std::sort(seq.begin(), seq.end(),
|
|
||||||
[](llama_sbatch_seq & a, llama_sbatch_seq & b) {
|
|
||||||
if (a.n_seq_id == b.n_seq_id) {
|
|
||||||
return a.length > b.length;
|
|
||||||
}
|
|
||||||
return a.n_seq_id < b.n_seq_id;
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_batch_allocr::llama_batch_allocr() {
|
|
||||||
const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG");
|
const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG");
|
||||||
debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0;
|
debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0;
|
||||||
|
|
||||||
|
@ -294,17 +18,22 @@ llama_batch_allocr::llama_batch_allocr() {
|
||||||
for (auto & cur : seq_cpl) {
|
for (auto & cur : seq_cpl) {
|
||||||
cur.resize(LLAMA_MAX_SEQ);
|
cur.resize(LLAMA_MAX_SEQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
seq_idx.resize(LLAMA_MAX_SEQ, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool llama_batch_allocr::init(
|
bool llama_batch_allocr::init(
|
||||||
const llama_batch & batch_inp,
|
const llama_batch & batch_inp,
|
||||||
const llama_vocab & vocab,
|
const llama_vocab & vocab,
|
||||||
const llama_memory_i * memory,
|
const llama_memory_i * memory,
|
||||||
bool embd_all) {
|
uint32_t n_embd,
|
||||||
|
bool output_all) {
|
||||||
clear();
|
clear();
|
||||||
|
|
||||||
batch = batch_inp;
|
batch = batch_inp;
|
||||||
|
|
||||||
|
this->vocab = &vocab;
|
||||||
|
|
||||||
GGML_ASSERT(batch.n_tokens > 0);
|
GGML_ASSERT(batch.n_tokens > 0);
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -359,6 +88,7 @@ bool llama_batch_allocr::init(
|
||||||
llama_pos p0[LLAMA_MAX_SEQ];
|
llama_pos p0[LLAMA_MAX_SEQ];
|
||||||
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
||||||
if (!memory) {
|
if (!memory) {
|
||||||
|
// if no memory -> start from 0
|
||||||
p0[s] = 0;
|
p0[s] = 0;
|
||||||
} else {
|
} else {
|
||||||
p0[s] = memory->seq_pos_max(s) + 1;
|
p0[s] = memory->seq_pos_max(s) + 1;
|
||||||
|
@ -370,8 +100,11 @@ bool llama_batch_allocr::init(
|
||||||
|
|
||||||
pos[i] = p0[seq_id];
|
pos[i] = p0[seq_id];
|
||||||
|
|
||||||
|
// update the starting position for all sequences that are assigned to the this token
|
||||||
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
|
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
|
||||||
p0[batch.seq_id[i][s]] = pos[i] + 1;
|
const llama_seq_id seq_id = batch.seq_id[i][s];
|
||||||
|
|
||||||
|
p0[seq_id] = pos[i] + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -379,7 +112,7 @@ bool llama_batch_allocr::init(
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!batch.logits) {
|
if (!batch.logits) {
|
||||||
if (embd_all) {
|
if (output_all) {
|
||||||
// return the output for all tokens
|
// return the output for all tokens
|
||||||
output.resize(batch.n_tokens, true);
|
output.resize(batch.n_tokens, true);
|
||||||
} else {
|
} else {
|
||||||
|
@ -389,7 +122,7 @@ bool llama_batch_allocr::init(
|
||||||
}
|
}
|
||||||
|
|
||||||
batch.logits = output.data();
|
batch.logits = output.data();
|
||||||
} else if (embd_all) {
|
} else if (output_all) {
|
||||||
bool warn = false;
|
bool warn = false;
|
||||||
|
|
||||||
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
||||||
|
@ -410,6 +143,9 @@ bool llama_batch_allocr::init(
|
||||||
// compute stats
|
// compute stats
|
||||||
//
|
//
|
||||||
|
|
||||||
|
this->n_embd = n_embd;
|
||||||
|
|
||||||
|
// count the outputs in this batch
|
||||||
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
||||||
n_outputs += batch.logits[i] != 0;
|
n_outputs += batch.logits[i] != 0;
|
||||||
}
|
}
|
||||||
|
@ -417,66 +153,68 @@ bool llama_batch_allocr::init(
|
||||||
// determine coupled sequences
|
// determine coupled sequences
|
||||||
// these are pairs of sequences that have at least one token in the input batch that is assigned to both of them
|
// these are pairs of sequences that have at least one token in the input batch that is assigned to both of them
|
||||||
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
||||||
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
|
|
||||||
seq_pos[batch.seq_id[i][s]].insert(batch.pos[i]);
|
|
||||||
|
|
||||||
if (s > 0) {
|
|
||||||
const llama_seq_id s0 = batch.seq_id[i][0];
|
const llama_seq_id s0 = batch.seq_id[i][0];
|
||||||
|
|
||||||
|
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
|
||||||
const llama_seq_id s1 = batch.seq_id[i][s];
|
const llama_seq_id s1 = batch.seq_id[i][s];
|
||||||
|
|
||||||
|
seq_pos[s1].insert(batch.pos[i]);
|
||||||
|
|
||||||
|
if (s > 0) {
|
||||||
// mark that sequence s1 is coupled to s0
|
// mark that sequence s1 is coupled to s0
|
||||||
seq_cpl[s1][s0] = true;
|
seq_cpl[s1][s0] = true;
|
||||||
|
|
||||||
// note: the other way around is not necessary for now
|
// note: tracking the other way around is not necessary for now
|
||||||
//seq_cpl[s0][s1] = true;
|
//seq_cpl[s0][s1] = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// precompute the sequence sets for each token and determine the unique sequence ids that participate in the batch
|
||||||
|
{
|
||||||
|
seq_set_t seq_set_unq;
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
||||||
|
seq_set_t cur;
|
||||||
|
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[i][s];
|
||||||
|
|
||||||
|
cur .set(seq_id);
|
||||||
|
seq_set_unq.set(seq_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
seq_set.push_back(cur);
|
||||||
|
seq_set_map[cur].push_back(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
||||||
|
if (seq_set_unq.test(s)) {
|
||||||
|
seq_idx[s] = seq_id_unq.size();
|
||||||
|
seq_id_unq.push_back(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (debug > 0) {
|
if (debug > 0) {
|
||||||
LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__);
|
LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__);
|
||||||
LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, batch.n_tokens);
|
|
||||||
LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) batch.token);
|
|
||||||
LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) batch.embd);
|
|
||||||
LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) batch.pos);
|
|
||||||
LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) batch.n_seq_id);
|
|
||||||
LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) batch.seq_id);
|
|
||||||
LLAMA_LOG_DEBUG("%s: logits = %p\n", __func__, (void *) batch.logits);
|
|
||||||
LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs);
|
|
||||||
|
|
||||||
if (debug > 1) {
|
llama_ubatch ubatch {
|
||||||
int seq_id_max = 0;
|
/*.equal_seqs =*/ false,
|
||||||
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
/*.n_tokens =*/ (uint32_t) batch.n_tokens,
|
||||||
for (int s = 0; s < batch.n_seq_id[i]; ++s) {
|
/*.n_seq_tokens =*/ (uint32_t) 1,
|
||||||
for (int s = 0; s < batch.n_seq_id[i]; ++s) {
|
/*.n_seqs =*/ (uint32_t) batch.n_tokens,
|
||||||
seq_id_max = std::max(seq_id_max, batch.seq_id[i][s]);
|
/*.n_seqs_unq =*/ (uint32_t) this->seq_id_unq.size(),
|
||||||
}
|
/*.token =*/ batch.token,
|
||||||
}
|
/*.embd =*/ batch.embd,
|
||||||
}
|
/*.pos =*/ batch.pos,
|
||||||
++seq_id_max;
|
/*.n_seq_id =*/ batch.n_seq_id,
|
||||||
|
/*.seq_id =*/ batch.seq_id,
|
||||||
|
/*.seq_id_unq =*/ this->seq_id_unq.data(),
|
||||||
|
/*.seq_idx =*/ this->seq_idx.data(),
|
||||||
|
/*.output =*/ batch.logits,
|
||||||
|
};
|
||||||
|
|
||||||
LLAMA_LOG_DEBUG("%s: token = [\n", __func__);
|
ubatch_print(ubatch, debug);
|
||||||
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
|
||||||
std::vector<int8_t> seq_id(seq_id_max);
|
|
||||||
|
|
||||||
for (int s = 0; s < batch.n_seq_id[i]; ++s) {
|
|
||||||
seq_id[batch.seq_id[i][s]] = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::stringstream ss;
|
|
||||||
for (int s = 0; s < seq_id_max; ++s) {
|
|
||||||
if (seq_id[s]) {
|
|
||||||
ss << s%10;
|
|
||||||
} else {
|
|
||||||
ss << ".";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
|
|
||||||
__func__, i, batch.token[i], vocab.token_to_piece(batch.token[i]).c_str(),
|
|
||||||
batch.pos[i], batch.n_seq_id[i], ss.str().c_str(), batch.logits[i]);
|
|
||||||
}
|
|
||||||
LLAMA_LOG_DEBUG("%s: ]\n", __func__);
|
|
||||||
|
|
||||||
LLAMA_LOG_DEBUG("%s: seq = [\n", __func__);
|
LLAMA_LOG_DEBUG("%s: seq = [\n", __func__);
|
||||||
for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) {
|
for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) {
|
||||||
|
@ -496,7 +234,6 @@ bool llama_batch_allocr::init(
|
||||||
}
|
}
|
||||||
LLAMA_LOG_DEBUG("%s: ]\n", __func__);
|
LLAMA_LOG_DEBUG("%s: ]\n", __func__);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// consistency checks
|
// consistency checks
|
||||||
|
@ -507,10 +244,23 @@ bool llama_batch_allocr::init(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (memory && seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
|
if (memory) {
|
||||||
|
if (batch.token) {
|
||||||
|
if (seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
|
||||||
LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s);
|
LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
assert(batch.embd);
|
||||||
|
|
||||||
|
// for embeddings (typically used as vision input), we allow them to have repeating positions
|
||||||
|
// ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
|
||||||
|
if (seq_pos_min(s) != memory->seq_pos_max(s) && seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
|
||||||
|
LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) {
|
if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) {
|
||||||
LLAMA_LOG_ERROR("%s: sequence %d positions are not continuous\n", __func__, s);
|
LLAMA_LOG_ERROR("%s: sequence %d positions are not continuous\n", __func__, s);
|
||||||
|
@ -532,17 +282,120 @@ bool llama_batch_allocr::init(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// disallow partial sequence sub-sets:
|
||||||
|
//
|
||||||
|
// invalid: x
|
||||||
|
// i: 0 1 2 ...
|
||||||
|
// ---------------------------------------
|
||||||
|
// seq_id[i][0]: 0 0 1
|
||||||
|
// seq_id[i][1]: 1 1 2
|
||||||
|
// seq_id[i][2]: 2
|
||||||
|
//
|
||||||
|
// disallow decreasing sequence positions:
|
||||||
|
//
|
||||||
|
// invalid: x
|
||||||
|
// i: 0 1 2 3 4 5 6 ...
|
||||||
|
// ---------------------------------------
|
||||||
|
// pos[i]: 4 5 0 1 6 2 3
|
||||||
|
// seq_id[i][0]: 0 0 1 1 0 1 0
|
||||||
|
//
|
||||||
|
{
|
||||||
|
seq_set_t cur_seq_set[LLAMA_MAX_SEQ];
|
||||||
|
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
||||||
|
cur_seq_set[s].set();
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_pos cur_seq_pos[LLAMA_MAX_SEQ];
|
||||||
|
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
||||||
|
cur_seq_pos[s] = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
||||||
|
const llama_pos pos = batch.pos[i];
|
||||||
|
|
||||||
|
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[i][s];
|
||||||
|
|
||||||
|
cur_seq_set[seq_id] &= seq_set[i];
|
||||||
|
|
||||||
|
if (cur_seq_set[seq_id].none()) {
|
||||||
|
LLAMA_LOG_ERROR("%s: sequence %d belongs to incompatible sequence sets (not allowed)\n", __func__, seq_id);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pos < cur_seq_pos[seq_id]) {
|
||||||
|
LLAMA_LOG_ERROR("%s: sequence %d positions are decreasing (not allowed)\n", __func__, seq_id);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
split_reset();
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_ubatch llama_batch_allocr::ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs) {
|
||||||
|
const uint32_t n_tokens = n_seq_tokens*n_seqs;
|
||||||
|
|
||||||
|
clear();
|
||||||
|
split_reset();
|
||||||
|
|
||||||
|
ubatches.emplace_back();
|
||||||
|
|
||||||
|
auto & ubatch = ubatches.back();
|
||||||
|
|
||||||
|
ubatch.token .resize(n_tokens);
|
||||||
|
ubatch.embd .clear();
|
||||||
|
ubatch.pos .resize(n_tokens);
|
||||||
|
ubatch.n_seq_id .resize(n_tokens);
|
||||||
|
ubatch.seq_id .resize(n_tokens);
|
||||||
|
ubatch.seq_id_unq.resize(0);
|
||||||
|
ubatch.seq_idx .resize(LLAMA_MAX_SEQ, -1);
|
||||||
|
ubatch.output .resize(n_tokens);
|
||||||
|
|
||||||
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
||||||
|
ubatch.seq_idx[s] = s;
|
||||||
|
ubatch.seq_id_unq.push_back(s);
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ubatch res {
|
||||||
|
/*.equal_seqs =*/ true,
|
||||||
|
/*.n_tokens =*/ n_tokens,
|
||||||
|
/*.n_seq_tokens =*/ n_seq_tokens,
|
||||||
|
/*.n_seqs =*/ n_seqs,
|
||||||
|
/*.n_seqs_unq =*/ n_seqs,
|
||||||
|
|
||||||
|
/*.token =*/ ubatch.token.data(),
|
||||||
|
/*.embd =*/ nullptr,
|
||||||
|
/*.pos =*/ ubatch.pos.data(),
|
||||||
|
/*.n_seq_id =*/ ubatch.n_seq_id.data(),
|
||||||
|
/*.seq_id =*/ ubatch.seq_id.data(),
|
||||||
|
/*.seq_id_unq =*/ ubatch.seq_id_unq.data(),
|
||||||
|
/*.seq_idx =*/ ubatch.seq_idx.data(),
|
||||||
|
/*.output =*/ ubatch.output.data(),
|
||||||
|
};
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
const llama_batch & llama_batch_allocr::get_batch() const {
|
const llama_batch & llama_batch_allocr::get_batch() const {
|
||||||
return batch;
|
return batch;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t llama_batch_allocr::get_n_tokens() const {
|
||||||
|
return batch.n_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
uint32_t llama_batch_allocr::get_n_outputs() const {
|
uint32_t llama_batch_allocr::get_n_outputs() const {
|
||||||
return n_outputs;
|
return n_outputs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<int32_t> & llama_batch_allocr::get_out_ids() {
|
||||||
|
return out_ids;
|
||||||
|
}
|
||||||
|
|
||||||
llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const {
|
llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const {
|
||||||
return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin();
|
return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin();
|
||||||
}
|
}
|
||||||
|
@ -551,13 +404,187 @@ llama_pos llama_batch_allocr::seq_pos_max(llama_seq_id seq_id) const {
|
||||||
return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin();
|
return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void llama_batch_allocr::split_reset() {
|
||||||
|
out_ids.clear();
|
||||||
|
|
||||||
|
used.clear();
|
||||||
|
used.resize(get_n_tokens(), false);
|
||||||
|
|
||||||
|
ubatches.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ubatch llama_batch_allocr::split_simple(uint32_t n_ubatch) {
|
||||||
|
// find the first unused token
|
||||||
|
uint32_t cur_idx = 0;
|
||||||
|
while (cur_idx < used.size() && used[cur_idx]) {
|
||||||
|
++cur_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
// we are done
|
||||||
|
if (cur_idx >= used.size()) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<int32_t> idxs;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
idxs.push_back(cur_idx);
|
||||||
|
|
||||||
|
used[cur_idx] = true;
|
||||||
|
|
||||||
|
++cur_idx;
|
||||||
|
|
||||||
|
if (cur_idx >= used.size()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (idxs.size() >= n_ubatch) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ubatch_add(idxs, idxs.size(), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ubatch llama_batch_allocr::split_equal(uint32_t n_ubatch) {
|
||||||
|
std::vector<seq_set_t> cur_seq_set;
|
||||||
|
|
||||||
|
// determine the non-overlapping sequence sets participating in this ubatch
|
||||||
|
for (int32_t i = 0; i < batch.n_tokens; ++i) {
|
||||||
|
if (used[i]) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool add = true;
|
||||||
|
|
||||||
|
for (uint32_t s = 0; s < cur_seq_set.size(); ++s) {
|
||||||
|
// no overlap with existing sequence sets:
|
||||||
|
if (!(cur_seq_set[s] & seq_set[i]).none()) {
|
||||||
|
add = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (add) {
|
||||||
|
cur_seq_set.push_back(seq_set[i]);
|
||||||
|
|
||||||
|
if (cur_seq_set.size() > n_ubatch) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const uint32_t n_seqs = cur_seq_set.size();
|
||||||
|
|
||||||
|
// we are done
|
||||||
|
if (n_seqs == 0) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// the current batch index of each sequence set
|
||||||
|
std::vector<int32_t> cur_idx(n_seqs, 0);
|
||||||
|
|
||||||
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
||||||
|
while (used[seq_set_map[cur_seq_set[s]][cur_idx[s]]]) {
|
||||||
|
++cur_idx[s];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// the list of batch indices for each sequence set
|
||||||
|
// at the end we will concat these to get the final ubatch
|
||||||
|
std::vector<idx_vec_t> idxs_per_seq(n_seqs);
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
// we can only add new n_seq_tokens tokens if all the sequence sets have at least one more unused token and
|
||||||
|
// if we haven't reached n_ubatch
|
||||||
|
bool can_expand = true;
|
||||||
|
|
||||||
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
||||||
|
if (cur_idx[s] >= (int32_t) seq_set_map[cur_seq_set[s]].size()) {
|
||||||
|
can_expand = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!can_expand) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
||||||
|
const int32_t idx = seq_set_map[cur_seq_set[s]][cur_idx[s]];
|
||||||
|
|
||||||
|
idxs_per_seq[s].push_back(idx);
|
||||||
|
|
||||||
|
used[idx] = true;
|
||||||
|
|
||||||
|
++cur_idx[s];
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((idxs_per_seq[0].size() + 1)*n_seqs > n_ubatch) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// concat the per-sequence-set lists
|
||||||
|
std::vector<int32_t> idxs;
|
||||||
|
|
||||||
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
||||||
|
idxs.insert(idxs.end(), idxs_per_seq[s].begin(), idxs_per_seq[s].end());
|
||||||
|
}
|
||||||
|
|
||||||
|
return ubatch_add(idxs, n_seqs, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ubatch llama_batch_allocr::split_seq(uint32_t n_ubatch) {
|
||||||
|
// find the first unused token
|
||||||
|
uint32_t cur_idx = 0;
|
||||||
|
while (cur_idx < used.size() && used[cur_idx]) {
|
||||||
|
++cur_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
// we are done
|
||||||
|
if (cur_idx >= used.size()) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is the starting sequence set
|
||||||
|
// we allow adding tokens only if their sequence set is a subset of the current sequence set
|
||||||
|
auto cur_seq_set = seq_set[cur_idx];
|
||||||
|
|
||||||
|
std::vector<int32_t> idxs;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
idxs.push_back(cur_idx);
|
||||||
|
|
||||||
|
used[cur_idx] = true;
|
||||||
|
|
||||||
|
if (idxs.size() >= n_ubatch) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
|
++cur_idx;
|
||||||
|
} while (cur_idx < get_n_tokens() && (used[cur_idx] || ((cur_seq_set & seq_set[cur_idx]) != seq_set[cur_idx])));
|
||||||
|
|
||||||
|
if (cur_idx == get_n_tokens()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
cur_seq_set = seq_set[cur_idx];
|
||||||
|
}
|
||||||
|
|
||||||
|
return ubatch_add(idxs, 1, true);
|
||||||
|
}
|
||||||
|
|
||||||
void llama_batch_allocr::clear() {
|
void llama_batch_allocr::clear() {
|
||||||
n_outputs = 0;
|
n_outputs = 0;
|
||||||
|
|
||||||
batch = {};
|
batch = {};
|
||||||
|
|
||||||
pos .clear();
|
pos .clear();
|
||||||
n_seq_id .clear();
|
n_seq_id .clear();
|
||||||
seq_id .clear();
|
seq_id .clear();
|
||||||
|
seq_id_unq.clear();
|
||||||
output .clear();
|
output .clear();
|
||||||
|
|
||||||
for (auto & cur : seq_pos) {
|
for (auto & cur : seq_pos) {
|
||||||
|
@ -567,6 +594,177 @@ void llama_batch_allocr::clear() {
|
||||||
for (auto & cur : seq_cpl) {
|
for (auto & cur : seq_cpl) {
|
||||||
std::fill(cur.begin(), cur.end(), false);
|
std::fill(cur.begin(), cur.end(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
seq_set.clear();
|
||||||
|
|
||||||
|
seq_set_map.clear();
|
||||||
|
|
||||||
|
std::fill(seq_idx.begin(), seq_idx.end(), -1);
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ubatch llama_batch_allocr::ubatch_add(const std::vector<int32_t> & idxs, uint32_t n_seqs, bool equal_seqs) {
|
||||||
|
const uint32_t n_tokens = idxs.size();
|
||||||
|
|
||||||
|
assert(n_tokens%n_seqs == 0);
|
||||||
|
|
||||||
|
ubatches.emplace_back();
|
||||||
|
|
||||||
|
auto & ubatch = ubatches.back();
|
||||||
|
|
||||||
|
const int32_t n_pos_cur = batch.embd ? n_pos_per_embd : 1;
|
||||||
|
|
||||||
|
const int64_t n_embd_all = batch.embd ? (int64_t) n_tokens*n_embd : 0;
|
||||||
|
const int64_t n_pos_all = (int64_t) n_tokens*n_pos_cur;
|
||||||
|
|
||||||
|
ubatch.token .resize(n_tokens);
|
||||||
|
ubatch.embd .resize(n_embd_all);
|
||||||
|
ubatch.pos .resize(n_pos_all);
|
||||||
|
ubatch.n_seq_id .resize(n_tokens);
|
||||||
|
ubatch.seq_id .resize(n_tokens);
|
||||||
|
ubatch.seq_id_unq.resize(0);
|
||||||
|
ubatch.seq_idx .resize(LLAMA_MAX_SEQ, -1);
|
||||||
|
ubatch.output .resize(n_tokens);
|
||||||
|
|
||||||
|
seq_set_t seq_set_unq;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < idxs.size(); ++i) {
|
||||||
|
if (batch.token) {
|
||||||
|
ubatch.token[i] = batch.token[idxs[i]];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (batch.embd) {
|
||||||
|
memcpy(ubatch.embd.data() + i*n_embd, batch.embd + (int64_t) idxs[i]*n_embd, n_embd*sizeof(float));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int j = 0; j < n_pos_cur; ++j) {
|
||||||
|
ubatch.pos[j*n_tokens + i] = batch.pos[j*batch.n_tokens + idxs[i]];
|
||||||
|
}
|
||||||
|
|
||||||
|
ubatch.n_seq_id[i] = batch.n_seq_id[idxs[i]];
|
||||||
|
ubatch.seq_id[i] = batch.seq_id[idxs[i]];
|
||||||
|
ubatch.output[i] = batch.logits[idxs[i]];
|
||||||
|
|
||||||
|
for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
|
||||||
|
seq_set_unq.set(ubatch.seq_id[i][s]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ubatch.output[i]) {
|
||||||
|
out_ids.push_back(idxs[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
||||||
|
if (seq_set_unq.test(s)) {
|
||||||
|
ubatch.seq_idx[s] = ubatch.seq_id_unq.size();
|
||||||
|
ubatch.seq_id_unq.push_back(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ubatch res {
|
||||||
|
/*.equal_seqs =*/ equal_seqs,
|
||||||
|
/*.n_tokens =*/ n_tokens,
|
||||||
|
/*.n_seq_tokens =*/ n_tokens/n_seqs,
|
||||||
|
/*.n_seqs =*/ n_seqs,
|
||||||
|
/*.n_seqs_unq =*/ (uint32_t) ubatch.seq_id_unq.size(),
|
||||||
|
|
||||||
|
/*.token =*/ batch.token ? ubatch.token.data() : nullptr,
|
||||||
|
/*.embd =*/ batch.embd ? ubatch.embd.data() : nullptr,
|
||||||
|
/*.pos =*/ ubatch.pos.data(),
|
||||||
|
/*.n_seq_id =*/ ubatch.n_seq_id.data(),
|
||||||
|
/*.seq_id =*/ ubatch.seq_id.data(),
|
||||||
|
/*.seq_id_unq =*/ ubatch.seq_id_unq.data(),
|
||||||
|
/*.seq_idx =*/ ubatch.seq_idx.data(),
|
||||||
|
/*.output =*/ ubatch.output.data(),
|
||||||
|
};
|
||||||
|
|
||||||
|
if (debug > 0) {
|
||||||
|
LLAMA_LOG_DEBUG("%s: added ubatch %d to split:\n", __func__, (int) ubatches.size() - 1);
|
||||||
|
|
||||||
|
ubatch_print(res, debug);
|
||||||
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_batch_allocr::ubatch_print(const llama_ubatch & ubatch, int debug) {
|
||||||
|
if (debug > 0) {
|
||||||
|
LLAMA_LOG_DEBUG("%s: equal_seqs = %d\n", __func__, ubatch.equal_seqs);
|
||||||
|
LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, ubatch.n_tokens);
|
||||||
|
LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d\n", __func__, ubatch.n_seq_tokens);
|
||||||
|
LLAMA_LOG_DEBUG("%s: n_seqs = %d\n", __func__, ubatch.n_seqs);
|
||||||
|
LLAMA_LOG_DEBUG("%s: n_seqs_unq = %d\n", __func__, ubatch.n_seqs_unq);
|
||||||
|
|
||||||
|
std::stringstream ss_seq_id_unq;
|
||||||
|
std::stringstream ss_seq_idx;
|
||||||
|
|
||||||
|
ss_seq_id_unq << "[ ";
|
||||||
|
ss_seq_idx << "[";
|
||||||
|
|
||||||
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
||||||
|
ss_seq_id_unq << ubatch.seq_id_unq[s] << " ";
|
||||||
|
}
|
||||||
|
|
||||||
|
for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
||||||
|
if (ubatch.seq_idx[s] >= 0) {
|
||||||
|
ss_seq_idx << ubatch.seq_idx[s]%10;
|
||||||
|
} else {
|
||||||
|
ss_seq_idx << ".";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ss_seq_id_unq << "]";
|
||||||
|
ss_seq_idx << "]";
|
||||||
|
|
||||||
|
LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) ubatch.token);
|
||||||
|
LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) ubatch.embd);
|
||||||
|
LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) ubatch.pos);
|
||||||
|
LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) ubatch.n_seq_id);
|
||||||
|
LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) ubatch.seq_id);
|
||||||
|
LLAMA_LOG_DEBUG("%s: seq_id_unq = %s\n", __func__, ss_seq_id_unq.str().c_str());
|
||||||
|
LLAMA_LOG_DEBUG("%s: seq_idx = %s\n", __func__, ss_seq_idx.str().c_str());
|
||||||
|
LLAMA_LOG_DEBUG("%s: output = %p\n", __func__, (void *) ubatch.output);
|
||||||
|
LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs);
|
||||||
|
|
||||||
|
if (debug > 1) {
|
||||||
|
int seq_id_max = 0;
|
||||||
|
for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
|
||||||
|
for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
|
||||||
|
for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
|
||||||
|
seq_id_max = std::max(seq_id_max, ubatch.seq_id[i][s]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
++seq_id_max;
|
||||||
|
|
||||||
|
LLAMA_LOG_DEBUG("%s: token = [\n", __func__);
|
||||||
|
for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
|
||||||
|
std::vector<int8_t> seq_id(seq_id_max);
|
||||||
|
|
||||||
|
for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
|
||||||
|
seq_id[ubatch.seq_id[i][s]] = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::stringstream ss;
|
||||||
|
for (int s = 0; s < seq_id_max; ++s) {
|
||||||
|
if (seq_id[s]) {
|
||||||
|
ss << s%10;
|
||||||
|
} else {
|
||||||
|
ss << ".";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ubatch.token) {
|
||||||
|
LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
|
||||||
|
__func__, i, ubatch.token[i], vocab->token_to_piece(ubatch.token[i]).c_str(),
|
||||||
|
ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]);
|
||||||
|
} else {
|
||||||
|
LLAMA_LOG_DEBUG("%s: %4d: [embd], pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
|
||||||
|
__func__, i, ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LLAMA_LOG_DEBUG("%s: ]\n", __func__);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
|
@ -2,86 +2,44 @@
|
||||||
|
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include "llama-cparams.h"
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <set>
|
#include <set>
|
||||||
|
#include <bitset>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
// very similar to llama_batch,
|
// keep this struct lightweight
|
||||||
// but has more metadata about sequences
|
// it points to data in `llama_batch_allocr`
|
||||||
struct llama_ubatch {
|
struct llama_ubatch {
|
||||||
bool equal_seqs;
|
bool equal_seqs;
|
||||||
// TODO: whole_seqs for embeddings?
|
// TODO: whole_seqs for embeddings?
|
||||||
|
|
||||||
uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs)
|
uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs)
|
||||||
uint32_t n_seq_tokens; // tokens per sequence
|
uint32_t n_seq_tokens; // tokens per sequence set
|
||||||
uint32_t n_seqs;
|
uint32_t n_seqs; // sequence sets in the ubatch
|
||||||
|
uint32_t n_seqs_unq; // unique sequence ids in the ubatch
|
||||||
|
|
||||||
llama_token * token; // [n_tokens]
|
// seq_id_unq: unique sequence ids in the ubatch
|
||||||
float * embd; // [n_embd, n_tokens]
|
// seq_idx: indices of the unique sequence ids in the ubatch in [0, n_seqs_unq)
|
||||||
llama_pos * pos; // [n_tokens]
|
// used for extracting sequence pooled embeddings
|
||||||
int32_t * n_seq_id; // [n_seqs]
|
|
||||||
llama_seq_id ** seq_id; // [n_seqs]
|
// // size | idx | val
|
||||||
int8_t * output; // [n_tokens]
|
llama_token * token; // [n_tokens] | i | id, token
|
||||||
|
float * embd; // [n_embd, n_tokens] | i | embd
|
||||||
|
llama_pos * pos; // [n_tokens] | i | pos
|
||||||
|
int32_t * n_seq_id; // [n_tokens] | i | -
|
||||||
|
llama_seq_id ** seq_id; // [n_tokens] | s | s0, s1, seq_id
|
||||||
|
llama_seq_id * seq_id_unq; // [n_seqs_unq] | s | seq_id
|
||||||
|
int32_t * seq_idx; // [LLAMA_MAX_SEQ] | - | seq_idx
|
||||||
|
int8_t * output; // [n_tokens] | i | -
|
||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sbatch_seq {
|
// a helper for sanitizing, fulfilling and splitting a batch
|
||||||
int32_t n_seq_id;
|
|
||||||
|
|
||||||
llama_seq_id * seq_id;
|
|
||||||
|
|
||||||
size_t offset;
|
|
||||||
size_t length;
|
|
||||||
};
|
|
||||||
|
|
||||||
// sequence-length-aware batch splitting
|
|
||||||
struct llama_sbatch {
|
|
||||||
// tokens left in this batch
|
|
||||||
size_t n_tokens;
|
|
||||||
|
|
||||||
size_t n_embd;
|
|
||||||
|
|
||||||
// sorted indices into the batch
|
|
||||||
std::vector<int64_t> ids;
|
|
||||||
// batch indices of the output
|
|
||||||
std::vector<int64_t> out_ids;
|
|
||||||
std::vector<llama_sbatch_seq> seq;
|
|
||||||
|
|
||||||
const llama_batch * batch = nullptr;
|
|
||||||
|
|
||||||
// buffers for the ubatches
|
|
||||||
// TODO: very hacky, this needs a complete rework
|
|
||||||
struct ubatch_data {
|
|
||||||
std::vector<llama_token> token;
|
|
||||||
std::vector<float> embd;
|
|
||||||
std::vector<llama_pos> pos;
|
|
||||||
std::vector<int32_t> n_seq_id;
|
|
||||||
std::vector<llama_seq_id *> seq_id;
|
|
||||||
std::vector<int8_t> output;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<ubatch_data> udatas;
|
|
||||||
|
|
||||||
llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false);
|
|
||||||
|
|
||||||
void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length);
|
|
||||||
|
|
||||||
// simple split, unknown number of sequences of unequal lengths
|
|
||||||
llama_ubatch split_simple(size_t n_ubatch);
|
|
||||||
|
|
||||||
// make batches of equal-length sequences
|
|
||||||
llama_ubatch split_equal(size_t n_ubatch);
|
|
||||||
|
|
||||||
// sequence-wise split
|
|
||||||
llama_ubatch split_seq(size_t n_ubatch);
|
|
||||||
|
|
||||||
llama_sbatch() = default;
|
|
||||||
llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false);
|
|
||||||
};
|
|
||||||
|
|
||||||
// a helper for sanitizing and fulfilling a batch
|
|
||||||
class llama_batch_allocr {
|
class llama_batch_allocr {
|
||||||
public:
|
public:
|
||||||
llama_batch_allocr();
|
llama_batch_allocr(uint32_t n_pos_per_embd);
|
||||||
|
|
||||||
// sanitize and auto-gen missing data in the input batch
|
// sanitize and auto-gen missing data in the input batch
|
||||||
// memory is optional. if provided will be used to check for sequence continuity and to determine the positions
|
// memory is optional. if provided will be used to check for sequence continuity and to determine the positions
|
||||||
|
@ -89,20 +47,57 @@ public:
|
||||||
const llama_batch & batch_inp,
|
const llama_batch & batch_inp,
|
||||||
const llama_vocab & vocab,
|
const llama_vocab & vocab,
|
||||||
const llama_memory_i * memory,
|
const llama_memory_i * memory,
|
||||||
bool embd_all);
|
uint32_t n_embd,
|
||||||
|
bool output_all);
|
||||||
|
|
||||||
const llama_batch & get_batch() const;
|
const llama_batch & get_batch() const;
|
||||||
|
|
||||||
|
uint32_t get_n_tokens() const;
|
||||||
uint32_t get_n_outputs() const;
|
uint32_t get_n_outputs() const;
|
||||||
|
|
||||||
|
// the array of output indices in the order they were encountered during the ubatch splitting
|
||||||
|
std::vector<int32_t> & get_out_ids();
|
||||||
|
|
||||||
|
// min/max positions of each sequence in the current ubatch
|
||||||
llama_pos seq_pos_min(llama_seq_id seq_id) const;
|
llama_pos seq_pos_min(llama_seq_id seq_id) const;
|
||||||
llama_pos seq_pos_max(llama_seq_id seq_id) const;
|
llama_pos seq_pos_max(llama_seq_id seq_id) const;
|
||||||
|
|
||||||
|
// call once before splitting the batch to reset the internal state
|
||||||
|
void split_reset();
|
||||||
|
|
||||||
|
// simple split, unknown number of sequence sets of unequal lengths
|
||||||
|
llama_ubatch split_simple(uint32_t n_ubatch);
|
||||||
|
|
||||||
|
// make ubatches of equal-length sequences sets
|
||||||
|
llama_ubatch split_equal(uint32_t n_ubatch);
|
||||||
|
|
||||||
|
// sequence-set-wise split - each ubatch contains a single sequence-set
|
||||||
|
llama_ubatch split_seq(uint32_t n_ubatch);
|
||||||
|
|
||||||
|
// a helper method for creating a well-defined ubatch of tokens
|
||||||
|
// TODO: support embeddings if needed in the future
|
||||||
|
llama_ubatch ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void clear();
|
void clear();
|
||||||
|
|
||||||
|
// create the next ubatch based on the provided batch indices (idxs) and the number of sequence sets (n_seqs)
|
||||||
|
// return llama_ubatch.n_tokens == 0 if the entire batch was consumed
|
||||||
|
llama_ubatch ubatch_add(const std::vector<int32_t> & idxs, uint32_t n_seqs, bool equal_seqs);
|
||||||
|
|
||||||
|
// for debugging, start with LLAMA_BATCH_DEBUG=2
|
||||||
|
void ubatch_print(const llama_ubatch & ubatch, int debug);
|
||||||
|
|
||||||
llama_batch batch;
|
llama_batch batch;
|
||||||
|
|
||||||
|
// only for debugging purposes
|
||||||
|
const llama_vocab * vocab;
|
||||||
|
|
||||||
|
// TODO: this is more of a temporary solution until we have a better way to handle multiple positions per token/embd
|
||||||
|
// ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
|
||||||
|
const uint32_t n_pos_per_embd;
|
||||||
|
|
||||||
|
uint32_t n_embd;
|
||||||
uint32_t n_outputs;
|
uint32_t n_outputs;
|
||||||
|
|
||||||
std::array<llama_seq_id, 1> seq_id_0 = { 0 }; // default sequence id
|
std::array<llama_seq_id, 1> seq_id_0 = { 0 }; // default sequence id
|
||||||
|
@ -110,10 +105,43 @@ private:
|
||||||
std::vector<llama_pos> pos;
|
std::vector<llama_pos> pos;
|
||||||
std::vector<int32_t> n_seq_id;
|
std::vector<int32_t> n_seq_id;
|
||||||
std::vector<llama_seq_id *> seq_id;
|
std::vector<llama_seq_id *> seq_id;
|
||||||
|
std::vector<llama_seq_id> seq_id_unq;
|
||||||
|
std::vector<int32_t> seq_idx;
|
||||||
std::vector<int8_t> output;
|
std::vector<int8_t> output;
|
||||||
|
|
||||||
std::vector<std::set<llama_pos>> seq_pos; // seq_pos[s]: the set of positions in sequence s
|
using pos_set_t = std::set<llama_pos>;
|
||||||
std::vector<std::vector<bool>> seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1
|
using seq_cpl_t = std::vector<bool>;
|
||||||
|
|
||||||
|
std::vector<pos_set_t> seq_pos; // seq_pos[s]: the set of positions in sequence s
|
||||||
|
std::vector<seq_cpl_t> seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1
|
||||||
|
|
||||||
|
using idx_vec_t = std::vector<int32_t>;
|
||||||
|
using seq_set_t = std::bitset<LLAMA_MAX_SEQ>;
|
||||||
|
|
||||||
|
std::vector<seq_set_t> seq_set; // seq_set[i]: the sequence set of token i
|
||||||
|
|
||||||
|
std::unordered_map<seq_set_t, idx_vec_t> seq_set_map; // the indices at which the sequence set appears
|
||||||
|
|
||||||
|
// batch indices of the output
|
||||||
|
std::vector<int32_t> out_ids;
|
||||||
|
|
||||||
|
// used[i] indicates if token i has already been used in a previous ubatch
|
||||||
|
std::vector<bool> used;
|
||||||
|
|
||||||
|
// llama_ubatch points to this data:
|
||||||
|
struct ubatch {
|
||||||
|
std::vector<llama_token> token;
|
||||||
|
std::vector<float> embd;
|
||||||
|
std::vector<llama_pos> pos;
|
||||||
|
std::vector<int32_t> n_seq_id;
|
||||||
|
std::vector<llama_seq_id *> seq_id;
|
||||||
|
std::vector<llama_seq_id> seq_id_unq;
|
||||||
|
std::vector<int32_t> seq_idx;
|
||||||
|
std::vector<int8_t> output;
|
||||||
|
};
|
||||||
|
|
||||||
|
// current splitting state:
|
||||||
|
std::vector<ubatch> ubatches;
|
||||||
|
|
||||||
int debug;
|
int debug;
|
||||||
};
|
};
|
||||||
|
|
|
@ -20,7 +20,7 @@ llama_context::llama_context(
|
||||||
const llama_model & model,
|
const llama_model & model,
|
||||||
llama_context_params params) :
|
llama_context_params params) :
|
||||||
model(model),
|
model(model),
|
||||||
batch_allocr(std::make_unique<llama_batch_allocr>()) {
|
balloc(std::make_unique<llama_batch_allocr>(model.hparams.n_pos_per_embd())) {
|
||||||
LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__);
|
LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__);
|
||||||
|
|
||||||
t_start_us = model.t_start_us;
|
t_start_us = model.t_start_us;
|
||||||
|
@ -722,22 +722,26 @@ llm_graph_result_ptr llama_context::process_ubatch(const llama_ubatch & ubatch,
|
||||||
}
|
}
|
||||||
|
|
||||||
int llama_context::encode(const llama_batch & batch_inp) {
|
int llama_context::encode(const llama_batch & batch_inp) {
|
||||||
|
GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
|
||||||
|
|
||||||
if (batch_inp.n_tokens == 0) {
|
if (batch_inp.n_tokens == 0) {
|
||||||
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
|
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
|
const int64_t n_embd = hparams.n_embd;
|
||||||
|
|
||||||
// note: during encode, we always pass the full sequence starting from pos = 0
|
// note: during encode, we always pass the full sequence starting from pos = 0
|
||||||
if (!batch_allocr->init(batch_inp, model.vocab, nullptr, true)) {
|
if (!balloc->init(batch_inp, model.vocab, nullptr, n_embd, true)) {
|
||||||
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
const llama_batch & batch = batch_allocr->get_batch();
|
const uint32_t n_tokens = balloc->get_n_tokens();
|
||||||
|
|
||||||
const uint32_t n_tokens = batch.n_tokens;
|
const llama_ubatch ubatch = balloc->split_simple(n_tokens);
|
||||||
|
|
||||||
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
|
|
||||||
|
|
||||||
// micro-batching is not possible for non-causal encoding, so we process the batch in a single shot
|
// micro-batching is not possible for non-causal encoding, so we process the batch in a single shot
|
||||||
GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens");
|
GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens");
|
||||||
|
@ -751,14 +755,6 @@ int llama_context::encode(const llama_batch & batch_inp) {
|
||||||
|
|
||||||
n_queued_tokens += n_tokens;
|
n_queued_tokens += n_tokens;
|
||||||
|
|
||||||
const auto & hparams = model.hparams;
|
|
||||||
|
|
||||||
const int64_t n_embd = hparams.n_embd;
|
|
||||||
|
|
||||||
llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true);
|
|
||||||
|
|
||||||
const llama_ubatch ubatch = sbatch.split_simple(n_tokens);
|
|
||||||
|
|
||||||
// reserve output buffer
|
// reserve output buffer
|
||||||
if (output_reserve(n_tokens) < n_tokens) {
|
if (output_reserve(n_tokens) < n_tokens) {
|
||||||
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens);
|
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens);
|
||||||
|
@ -817,34 +813,28 @@ int llama_context::encode(const llama_batch & batch_inp) {
|
||||||
{
|
{
|
||||||
// extract sequence embeddings
|
// extract sequence embeddings
|
||||||
auto & embd_seq_out = embd_seq;
|
auto & embd_seq_out = embd_seq;
|
||||||
embd_seq_out.clear();
|
|
||||||
|
|
||||||
GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
||||||
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s];
|
||||||
|
const int32_t seq_idx = ubatch.seq_idx[seq_id];
|
||||||
|
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
|
||||||
for (uint32_t i = 0; i < n_tokens; i++) {
|
|
||||||
const llama_seq_id seq_id = ubatch.seq_id[i][0];
|
|
||||||
if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
embd_seq_out[seq_id].resize(n_embd);
|
embd_seq_out[seq_id].resize(n_embd);
|
||||||
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case LLAMA_POOLING_TYPE_RANK:
|
case LLAMA_POOLING_TYPE_RANK:
|
||||||
{
|
{
|
||||||
// extract the rerank score - n_cls_out floats per sequence
|
// extract the rerank score - n_cls_out floats per sequence
|
||||||
auto & embd_seq_out = embd_seq;
|
auto & embd_seq_out = embd_seq;
|
||||||
|
|
||||||
const uint32_t n_cls_out = hparams.n_cls_out;
|
const uint32_t n_cls_out = hparams.n_cls_out;
|
||||||
|
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
||||||
for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s];
|
||||||
const llama_seq_id seq_id = ubatch.seq_id[s][0];
|
const int32_t seq_idx = ubatch.seq_idx[seq_id];
|
||||||
if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
embd_seq_out[seq_id].resize(n_cls_out);
|
embd_seq_out[seq_id].resize(n_cls_out);
|
||||||
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_id)*sizeof(float), n_cls_out*sizeof(float));
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case LLAMA_POOLING_TYPE_UNSPECIFIED:
|
case LLAMA_POOLING_TYPE_UNSPECIFIED:
|
||||||
|
@ -869,12 +859,16 @@ int llama_context::encode(const llama_batch & batch_inp) {
|
||||||
cross.v_embd.resize(cross.n_embd*cross.n_enc);
|
cross.v_embd.resize(cross.n_embd*cross.n_enc);
|
||||||
memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd));
|
memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd));
|
||||||
|
|
||||||
|
const auto & batch = balloc->get_batch();
|
||||||
|
|
||||||
// remember the sequence ids used during the encoding - needed for cross attention later
|
// remember the sequence ids used during the encoding - needed for cross attention later
|
||||||
cross.seq_ids_enc.resize(n_tokens);
|
cross.seq_ids_enc.resize(n_tokens);
|
||||||
for (uint32_t i = 0; i < n_tokens; i++) {
|
for (uint32_t i = 0; i < n_tokens; i++) {
|
||||||
cross.seq_ids_enc[i].clear();
|
cross.seq_ids_enc[i].clear();
|
||||||
|
|
||||||
for (int s = 0; s < batch.n_seq_id[i]; s++) {
|
for (int s = 0; s < batch.n_seq_id[i]; s++) {
|
||||||
llama_seq_id seq_id = batch.seq_id[i][s];
|
const llama_seq_id seq_id = batch.seq_id[i][s];
|
||||||
|
|
||||||
cross.seq_ids_enc[i].insert(seq_id);
|
cross.seq_ids_enc[i].insert(seq_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -884,6 +878,8 @@ int llama_context::encode(const llama_batch & batch_inp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int llama_context::decode(const llama_batch & batch_inp) {
|
int llama_context::decode(const llama_batch & batch_inp) {
|
||||||
|
GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
|
||||||
|
|
||||||
if (!memory) {
|
if (!memory) {
|
||||||
//LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__);
|
//LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__);
|
||||||
return encode(batch_inp);
|
return encode(batch_inp);
|
||||||
|
@ -894,29 +890,24 @@ int llama_context::decode(const llama_batch & batch_inp) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// when computing embeddings, all tokens are output
|
|
||||||
const bool embd_all = cparams.embeddings;
|
|
||||||
|
|
||||||
if (!batch_allocr->init(batch_inp, model.vocab, memory.get(), embd_all)) {
|
|
||||||
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
const llama_batch & batch = batch_allocr->get_batch();
|
|
||||||
|
|
||||||
const auto & vocab = model.vocab;
|
const auto & vocab = model.vocab;
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
const int32_t n_vocab = vocab.n_tokens();
|
const int32_t n_vocab = vocab.n_tokens();
|
||||||
const int64_t n_embd = hparams.n_embd;
|
const int64_t n_embd = hparams.n_embd;
|
||||||
|
|
||||||
const uint32_t n_tokens_all = batch.n_tokens;
|
// when computing embeddings, all tokens are output
|
||||||
|
const bool output_all = cparams.embeddings;
|
||||||
|
|
||||||
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
|
if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, output_all)) {
|
||||||
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
const uint32_t n_outputs_all = batch_allocr->get_n_outputs();
|
const uint32_t n_tokens_all = balloc->get_n_tokens();
|
||||||
|
const uint32_t n_outputs_all = balloc->get_n_outputs();
|
||||||
|
|
||||||
if (embd_all) {
|
if (output_all) {
|
||||||
// require that all tokens are output
|
// require that all tokens are output
|
||||||
if (n_outputs_all != n_tokens_all) {
|
if (n_outputs_all != n_tokens_all) {
|
||||||
LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n",
|
LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n",
|
||||||
|
@ -945,7 +936,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
|
||||||
llama_memory_state_ptr mstate;
|
llama_memory_state_ptr mstate;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
mstate = memory->init_batch(batch, cparams.n_ubatch, embd_all);
|
mstate = memory->init_batch(*balloc, cparams.n_ubatch, output_all);
|
||||||
if (!mstate) {
|
if (!mstate) {
|
||||||
return -2;
|
return -2;
|
||||||
}
|
}
|
||||||
|
@ -966,19 +957,19 @@ int llama_context::decode(const llama_batch & batch_inp) {
|
||||||
did_optimize = true;
|
did_optimize = true;
|
||||||
|
|
||||||
if (kv_self_update(true)) {
|
if (kv_self_update(true)) {
|
||||||
LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, batch.n_tokens);
|
LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, balloc->get_n_tokens());
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, batch.n_tokens);
|
LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, balloc->get_n_tokens());
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
|
case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
|
||||||
{
|
{
|
||||||
LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, batch.n_tokens);
|
LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, balloc->get_n_tokens());
|
||||||
|
|
||||||
return -2;
|
return -2;
|
||||||
}
|
}
|
||||||
|
@ -1005,7 +996,6 @@ int llama_context::decode(const llama_batch & batch_inp) {
|
||||||
if (n_outputs_all == n_tokens_all) {
|
if (n_outputs_all == n_tokens_all) {
|
||||||
n_outputs_new = ubatch.n_tokens;
|
n_outputs_new = ubatch.n_tokens;
|
||||||
} else {
|
} else {
|
||||||
GGML_ASSERT(ubatch.output);
|
|
||||||
for (uint32_t i = 0; i < ubatch.n_tokens; i++) {
|
for (uint32_t i = 0; i < ubatch.n_tokens; i++) {
|
||||||
n_outputs_new += (int32_t) (ubatch.output[i] != 0);
|
n_outputs_new += (int32_t) (ubatch.output[i] != 0);
|
||||||
}
|
}
|
||||||
|
@ -1105,27 +1095,27 @@ int llama_context::decode(const llama_batch & batch_inp) {
|
||||||
// extract sequence embeddings (cleared before processing each batch)
|
// extract sequence embeddings (cleared before processing each batch)
|
||||||
auto & embd_seq_out = embd_seq;
|
auto & embd_seq_out = embd_seq;
|
||||||
|
|
||||||
for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
||||||
const llama_seq_id seq_id = ubatch.seq_id[s][0];
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s];
|
||||||
if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
|
const int32_t seq_idx = ubatch.seq_idx[seq_id];
|
||||||
continue;
|
|
||||||
}
|
|
||||||
embd_seq_out[seq_id].resize(n_embd);
|
embd_seq_out[seq_id].resize(n_embd);
|
||||||
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case LLAMA_POOLING_TYPE_RANK:
|
case LLAMA_POOLING_TYPE_RANK:
|
||||||
{
|
{
|
||||||
// extract the rerank score - a single float per sequence
|
// extract the rerank score - n_cls_out floats per sequence
|
||||||
auto & embd_seq_out = embd_seq;
|
auto & embd_seq_out = embd_seq;
|
||||||
|
|
||||||
for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
|
const uint32_t n_cls_out = hparams.n_cls_out;
|
||||||
const llama_seq_id seq_id = ubatch.seq_id[s][0];
|
|
||||||
if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
||||||
continue;
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s];
|
||||||
}
|
const int32_t seq_idx = ubatch.seq_idx[seq_id];
|
||||||
embd_seq_out[seq_id].resize(1);
|
|
||||||
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float));
|
embd_seq_out[seq_id].resize(n_cls_out);
|
||||||
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case LLAMA_POOLING_TYPE_UNSPECIFIED:
|
case LLAMA_POOLING_TYPE_UNSPECIFIED:
|
||||||
|
@ -1145,7 +1135,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
|
||||||
if (n_outputs > 0) {
|
if (n_outputs > 0) {
|
||||||
bool sorted_output = true;
|
bool sorted_output = true;
|
||||||
|
|
||||||
auto & out_ids = mstate->out_ids();
|
auto & out_ids = balloc->get_out_ids();
|
||||||
|
|
||||||
GGML_ASSERT(out_ids.size() == (size_t) n_outputs);
|
GGML_ASSERT(out_ids.size() == (size_t) n_outputs);
|
||||||
|
|
||||||
|
@ -1318,8 +1308,8 @@ ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, u
|
||||||
|
|
||||||
this->n_outputs = n_outputs;
|
this->n_outputs = n_outputs;
|
||||||
|
|
||||||
llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
|
llama_batch_allocr balloc(model.hparams.n_pos_per_embd());
|
||||||
llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr};
|
llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs);
|
||||||
|
|
||||||
auto * gf = graph_init();
|
auto * gf = graph_init();
|
||||||
auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate);
|
auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate);
|
||||||
|
@ -2039,7 +2029,12 @@ void llama_context::opt_epoch_iter(
|
||||||
batch.logits [pos_batch] = true;
|
batch.logits [pos_batch] = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto n_tokens_all = batch.n_tokens;
|
if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd, true)) {
|
||||||
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const uint32_t n_tokens_all = balloc->get_n_tokens();
|
||||||
|
|
||||||
n_queued_tokens += n_tokens_all;
|
n_queued_tokens += n_tokens_all;
|
||||||
|
|
||||||
|
@ -2047,7 +2042,7 @@ void llama_context::opt_epoch_iter(
|
||||||
|
|
||||||
uint32_t n_outputs_all = n_tokens_all;
|
uint32_t n_outputs_all = n_tokens_all;
|
||||||
|
|
||||||
auto mstate = memory->init_batch(batch, cparams.n_ubatch, true);
|
auto mstate = memory->init_batch(*balloc, cparams.n_ubatch, true);
|
||||||
if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) {
|
if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) {
|
||||||
LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__);
|
LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -247,7 +247,7 @@ private:
|
||||||
std::map<llama_seq_id, std::vector<float>> embd_seq;
|
std::map<llama_seq_id, std::vector<float>> embd_seq;
|
||||||
|
|
||||||
// reuse the batch_allocr to avoid unnecessary memory allocations
|
// reuse the batch_allocr to avoid unnecessary memory allocations
|
||||||
std::unique_ptr<llama_batch_allocr> batch_allocr;
|
std::unique_ptr<llama_batch_allocr> balloc;
|
||||||
|
|
||||||
uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
|
uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
|
||||||
|
|
||||||
|
|
|
@ -92,12 +92,8 @@ void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
|
void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
|
||||||
if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
|
GGML_ASSERT(out_ids);
|
||||||
//GGML_ASSERT(out_ids && "every model that can must skip unused outputs");
|
|
||||||
|
|
||||||
if (!out_ids) {
|
|
||||||
// LLAMA_LOG_WARN("%s: 'out_ids' is not created\n", __func__);
|
|
||||||
} else {
|
|
||||||
const int64_t n_tokens = ubatch->n_tokens;
|
const int64_t n_tokens = ubatch->n_tokens;
|
||||||
|
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
|
GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
|
||||||
|
@ -107,133 +103,116 @@ void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
|
||||||
for (int i = 0; i < n_tokens; ++i) {
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
data[i] = i;
|
data[i] = i;
|
||||||
}
|
}
|
||||||
} else if (ubatch->output) {
|
|
||||||
int32_t n_outputs = 0;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_ASSERT(ubatch->output);
|
||||||
|
|
||||||
|
int n_outputs = 0;
|
||||||
|
|
||||||
for (int i = 0; i < n_tokens; ++i) {
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
if (ubatch->output[i]) {
|
if (ubatch->output[i]) {
|
||||||
data[n_outputs++] = i;
|
data[n_outputs++] = i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// the graph needs to have been passed the correct number of outputs
|
|
||||||
GGML_ASSERT(n_outputs == n_outputs);
|
|
||||||
} else if (n_outputs == 1) {
|
|
||||||
// only keep last output
|
|
||||||
data[0] = n_tokens - 1;
|
|
||||||
} else {
|
|
||||||
GGML_ASSERT(n_outputs == 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) {
|
void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) {
|
||||||
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
|
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
|
||||||
const int64_t n_tokens = ubatch->n_tokens;
|
const int64_t n_tokens = ubatch->n_tokens;
|
||||||
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
|
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
|
||||||
const int64_t n_seqs = ubatch->n_seqs;
|
const int64_t n_seqs_unq = ubatch->n_seqs_unq;
|
||||||
|
|
||||||
GGML_ASSERT(mean);
|
GGML_ASSERT(mean);
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer));
|
GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer));
|
||||||
|
|
||||||
float * data = (float *) mean->data;
|
float * data = (float *) mean->data;
|
||||||
memset(mean->data, 0, n_tokens * n_tokens * ggml_element_size(mean));
|
memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean));
|
||||||
|
|
||||||
std::vector<uint64_t> sum(n_tokens, 0);
|
std::vector<uint64_t> sums(n_seqs_unq, 0);
|
||||||
|
for (int i = 0; i < n_tokens; i += n_seq_tokens) {
|
||||||
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
|
||||||
|
const llama_seq_id seq_id = ubatch->seq_id[i][s];
|
||||||
|
const int32_t seq_idx = ubatch->seq_idx[seq_id];
|
||||||
|
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
sums[seq_idx] += ubatch->n_seq_tokens;
|
||||||
for (int s = 0; s < n_seqs; ++s) {
|
|
||||||
const llama_seq_id seq_id = ubatch->seq_id[s][0];
|
|
||||||
|
|
||||||
// TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
|
|
||||||
GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
|
|
||||||
|
|
||||||
sum[seq_id] += ubatch->n_seq_tokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<float> div(n_tokens, 0.0f);
|
|
||||||
for (int i = 0; i < n_tokens; ++i) {
|
|
||||||
const uint64_t s = sum[i];
|
|
||||||
if (s > 0) {
|
|
||||||
div[i] = 1.0f/float(s);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
std::vector<float> div(n_seqs_unq, 0.0f);
|
||||||
for (int s = 0; s < n_seqs; ++s) {
|
for (int s = 0; s < n_seqs_unq; ++s) {
|
||||||
const llama_seq_id seq_id = ubatch->seq_id[s][0];
|
const uint64_t sum = sums[s];
|
||||||
|
if (sum > 0) {
|
||||||
|
div[s] = 1.0f/float(sum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (int i = 0; i < n_seq_tokens; ++i) {
|
for (int i = 0; i < n_tokens; i += n_seq_tokens) {
|
||||||
data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id];
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
|
||||||
|
const llama_seq_id seq_id = ubatch->seq_id[i][s];
|
||||||
|
const int32_t seq_idx = ubatch->seq_idx[seq_id];
|
||||||
|
|
||||||
|
for (int j = 0; j < n_seq_tokens; ++j) {
|
||||||
|
data[seq_idx*n_tokens + i + j] = div[seq_idx];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) {
|
void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) {
|
||||||
if (cparams.embeddings && (
|
|
||||||
cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
|
|
||||||
cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) {
|
|
||||||
const int64_t n_tokens = ubatch->n_tokens;
|
const int64_t n_tokens = ubatch->n_tokens;
|
||||||
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
|
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
|
||||||
const int64_t n_seqs = ubatch->n_seqs;
|
const int64_t n_seqs_unq = ubatch->n_seqs_unq;
|
||||||
|
|
||||||
|
if (cparams.embeddings && (
|
||||||
|
cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
|
||||||
|
cparams.pooling_type == LLAMA_POOLING_TYPE_RANK
|
||||||
|
)) {
|
||||||
GGML_ASSERT(cls);
|
GGML_ASSERT(cls);
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
|
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
|
||||||
|
|
||||||
uint32_t * data = (uint32_t *) cls->data;
|
uint32_t * data = (uint32_t *) cls->data;
|
||||||
memset(cls->data, 0, n_tokens * ggml_element_size(cls));
|
memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
|
||||||
|
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
for (int i = 0; i < n_tokens; i += n_seq_tokens) {
|
||||||
for (int s = 0; s < n_seqs; ++s) {
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
|
||||||
const llama_seq_id seq_id = ubatch->seq_id[s][0];
|
const llama_seq_id seq_id = ubatch->seq_id[i][s];
|
||||||
|
const int32_t seq_idx = ubatch->seq_idx[seq_id];
|
||||||
|
|
||||||
// TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
|
data[seq_idx] = i;
|
||||||
GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK");
|
|
||||||
|
|
||||||
for (int i = 0; i < n_seq_tokens; ++i) {
|
|
||||||
const llama_pos pos = ubatch->pos[s*n_seq_tokens + i];
|
|
||||||
|
|
||||||
if (pos == 0) {
|
|
||||||
data[seq_id] = s*n_seq_tokens + i;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
|
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
|
||||||
const int64_t n_tokens = ubatch->n_tokens;
|
|
||||||
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
|
|
||||||
const int64_t n_seqs = ubatch->n_seqs;
|
|
||||||
|
|
||||||
GGML_ASSERT(cls);
|
GGML_ASSERT(cls);
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
|
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
|
||||||
|
|
||||||
uint32_t * data = (uint32_t *) cls->data;
|
uint32_t * data = (uint32_t *) cls->data;
|
||||||
memset(cls->data, 0, n_tokens * ggml_element_size(cls));
|
memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
|
||||||
|
|
||||||
std::vector<int> last_pos(n_tokens, -1);
|
std::vector<int> last_pos(n_seqs_unq, -1);
|
||||||
std::vector<int> last_row(n_tokens, -1);
|
std::vector<int> last_row(n_seqs_unq, -1);
|
||||||
|
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
|
||||||
for (int s = 0; s < n_seqs; ++s) {
|
|
||||||
const llama_seq_id seq_id = ubatch->seq_id[s][0];
|
|
||||||
|
|
||||||
// TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
|
|
||||||
GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST");
|
|
||||||
|
|
||||||
for (int i = 0; i < n_seq_tokens; ++i) {
|
|
||||||
const llama_pos pos = ubatch->pos[s*n_seq_tokens + i];
|
|
||||||
|
|
||||||
if (pos >= last_pos[seq_id]) {
|
|
||||||
last_pos[seq_id] = pos;
|
|
||||||
last_row[seq_id] = s*n_seq_tokens + i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < n_tokens; ++i) {
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
if (last_row[i] >= 0) {
|
const llama_pos pos = ubatch->pos[i];
|
||||||
data[i] = last_row[i];
|
|
||||||
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
|
||||||
|
const llama_seq_id seq_id = ubatch->seq_id[i][s];
|
||||||
|
const int32_t seq_idx = ubatch->seq_idx[seq_id];
|
||||||
|
|
||||||
|
if (pos >= last_pos[seq_idx]) {
|
||||||
|
last_pos[seq_idx] = pos;
|
||||||
|
last_row[seq_idx] = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int s = 0; s < n_seqs_unq; ++s) {
|
||||||
|
if (last_row[s] >= 0) {
|
||||||
|
data[s] = last_row[s];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -266,33 +245,28 @@ void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
|
void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
|
||||||
if (kq_mask) {
|
|
||||||
if (cparams.causal_attn) {
|
|
||||||
const int64_t n_kv = ubatch->n_tokens;
|
const int64_t n_kv = ubatch->n_tokens;
|
||||||
const int64_t n_tokens = ubatch->n_tokens;
|
const int64_t n_tokens = ubatch->n_tokens;
|
||||||
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
|
|
||||||
const int64_t n_seqs = ubatch->n_seqs;
|
|
||||||
|
|
||||||
|
GGML_ASSERT(kq_mask);
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
|
GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
|
||||||
|
|
||||||
float * data = (float *) kq_mask->data;
|
float * data = (float *) kq_mask->data;
|
||||||
|
|
||||||
for (int h = 0; h < 1; ++h) {
|
for (int h = 0; h < 1; ++h) {
|
||||||
for (int s1 = 0; s1 < n_seqs; ++s1) {
|
for (int i1 = 0; i1 < n_tokens; ++i1) {
|
||||||
const llama_seq_id seq_id = ubatch->seq_id[s1][0];
|
const llama_seq_id s1 = ubatch->seq_id[i1][0];
|
||||||
|
|
||||||
for (int j = 0; j < n_seq_tokens; ++j) {
|
for (int i0 = 0; i0 < n_tokens; ++i0) {
|
||||||
const int32_t tj = s1*n_seq_tokens + j;
|
|
||||||
|
|
||||||
for (int s0 = 0; s0 < n_seqs; ++s0) {
|
|
||||||
for (int i = 0; i < n_seq_tokens; ++i) {
|
|
||||||
const int32_t ti = s0*n_seq_tokens + i;
|
|
||||||
float f = -INFINITY;
|
float f = -INFINITY;
|
||||||
|
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
for (int s = 0; s < ubatch->n_seq_id[i0]; ++s) {
|
||||||
for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) {
|
const llama_seq_id s0 = ubatch->seq_id[i0][0];
|
||||||
if (ubatch->seq_id[s0][s] == seq_id && ubatch->pos[ti] <= ubatch->pos[tj]) {
|
|
||||||
|
// TODO: reimplement this like in llama_kv_cache_unified
|
||||||
|
if (s0 == s1 && (!cparams.causal_attn || ubatch->pos[i0] <= ubatch->pos[i1])) {
|
||||||
if (hparams.use_alibi) {
|
if (hparams.use_alibi) {
|
||||||
f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]);
|
f = -std::abs(ubatch->pos[i0] - ubatch->pos[i1]);
|
||||||
} else {
|
} else {
|
||||||
f = 0.0f;
|
f = 0.0f;
|
||||||
}
|
}
|
||||||
|
@ -300,55 +274,7 @@ void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data[h*(n_kv*n_tokens) + tj*n_kv + ti] = f;
|
data[h*(n_kv*n_tokens) + i1*n_kv + i0] = f;
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const int64_t n_tokens = ubatch->n_tokens;
|
|
||||||
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
|
|
||||||
const int64_t n_seqs = ubatch->n_seqs;
|
|
||||||
const int64_t n_stride = ubatch->n_tokens;
|
|
||||||
|
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
|
|
||||||
|
|
||||||
float * data = (float *) kq_mask->data;
|
|
||||||
|
|
||||||
for (int h = 0; h < 1; ++h) {
|
|
||||||
for (int s1 = 0; s1 < n_seqs; ++s1) {
|
|
||||||
const llama_seq_id seq_id = ubatch->seq_id[s1][0];
|
|
||||||
|
|
||||||
for (int j = 0; j < n_seq_tokens; ++j) {
|
|
||||||
const int32_t tj = s1*n_seq_tokens + j;
|
|
||||||
|
|
||||||
for (int s0 = 0; s0 < n_seqs; ++s0) {
|
|
||||||
for (int i = 0; i < n_seq_tokens; ++i) {
|
|
||||||
const int32_t ti = s0*n_seq_tokens + i;
|
|
||||||
float f = -INFINITY;
|
|
||||||
|
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
|
||||||
for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) {
|
|
||||||
if (ubatch->seq_id[s0][s] == seq_id) {
|
|
||||||
if (hparams.use_alibi) {
|
|
||||||
f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]);
|
|
||||||
} else {
|
|
||||||
f = 0.0f;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = n_tokens; i < n_stride; ++i) {
|
|
||||||
data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -371,7 +297,8 @@ void llm_graph_input_attn_kv_unified_iswa::set_input(const llama_ubatch * ubatch
|
||||||
}
|
}
|
||||||
|
|
||||||
void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
|
void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
|
||||||
if (cross_kq_mask) {
|
GGML_ASSERT(cross_kq_mask);
|
||||||
|
|
||||||
const int64_t n_enc = cross_kq_mask->ne[0];
|
const int64_t n_enc = cross_kq_mask->ne[0];
|
||||||
const int64_t n_tokens = ubatch->n_tokens;
|
const int64_t n_tokens = ubatch->n_tokens;
|
||||||
|
|
||||||
|
@ -381,17 +308,19 @@ void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
|
||||||
float * data = (float *) cross_kq_mask->data;
|
float * data = (float *) cross_kq_mask->data;
|
||||||
|
|
||||||
for (int h = 0; h < 1; ++h) {
|
for (int h = 0; h < 1; ++h) {
|
||||||
for (int j = 0; j < n_tokens; ++j) {
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
for (int i = 0; i < n_enc; ++i) {
|
for (int j = 0; j < n_enc; ++j) {
|
||||||
float f = -INFINITY;
|
float f = -INFINITY;
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
|
||||||
for (int s = 0; s < ubatch->n_seq_id[j]; ++s) {
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
|
||||||
const llama_seq_id seq_id = ubatch->seq_id[j][s];
|
const llama_seq_id seq_id = ubatch->seq_id[i][s];
|
||||||
if (cross->seq_ids_enc[i].find(seq_id) != cross->seq_ids_enc[i].end()) {
|
|
||||||
|
if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) {
|
||||||
f = 0.0f;
|
f = 0.0f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
data[h*(n_enc*n_tokens) + j*n_enc + i] = f;
|
|
||||||
|
data[h*(n_enc*n_tokens) + i*n_enc + j] = f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,7 +331,6 @@ void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
|
void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
|
||||||
if (self_kq_mask) {
|
if (self_kq_mask) {
|
||||||
|
@ -467,10 +395,6 @@ llm_graph_context::llm_graph_context(const llm_graph_params & params) :
|
||||||
res (std::make_unique<llm_graph_result>()) {
|
res (std::make_unique<llm_graph_result>()) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t llm_graph_context::n_pos_per_embd() const {
|
|
||||||
return hparams.rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const {
|
void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const {
|
||||||
if (cb_func) {
|
if (cb_func) {
|
||||||
cb_func(ubatch, cur, name, il);
|
cb_func(ubatch, cur, name, il);
|
||||||
|
@ -915,11 +839,11 @@ ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor * llm_graph_context::build_inp_pos() const {
|
ggml_tensor * llm_graph_context::build_inp_pos() const {
|
||||||
auto inp = std::make_unique<llm_graph_input_pos>(n_pos_per_embd());
|
auto inp = std::make_unique<llm_graph_input_pos>(hparams.n_pos_per_embd());
|
||||||
|
|
||||||
auto & cur = inp->pos;
|
auto & cur = inp->pos;
|
||||||
|
|
||||||
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens*n_pos_per_embd());
|
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd());
|
||||||
ggml_set_input(cur);
|
ggml_set_input(cur);
|
||||||
|
|
||||||
res->add_input(std::move(inp));
|
res->add_input(std::move(inp));
|
||||||
|
@ -942,6 +866,14 @@ ggml_tensor * llm_graph_context::build_inp_attn_scale() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor * llm_graph_context::build_inp_out_ids() const {
|
ggml_tensor * llm_graph_context::build_inp_out_ids() const {
|
||||||
|
// note: when all tokens are output, we could skip this optimization to spare the ggml_get_rows() calls,
|
||||||
|
// but this would make the graph topology depend on the number of output tokens, which can interere with
|
||||||
|
// features that require constant topology such as pipline parallelism
|
||||||
|
// ref: https://github.com/ggml-org/llama.cpp/pull/14275#issuecomment-2987424471
|
||||||
|
//if (n_outputs < n_tokens) {
|
||||||
|
// return nullptr;
|
||||||
|
//}
|
||||||
|
|
||||||
auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs);
|
auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs);
|
||||||
|
|
||||||
auto & cur = inp->out_ids;
|
auto & cur = inp->out_ids;
|
||||||
|
@ -959,7 +891,7 @@ ggml_tensor * llm_graph_context::build_inp_mean() const {
|
||||||
|
|
||||||
auto & cur = inp->mean;
|
auto & cur = inp->mean;
|
||||||
|
|
||||||
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
|
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq);
|
||||||
ggml_set_input(cur);
|
ggml_set_input(cur);
|
||||||
|
|
||||||
res->add_input(std::move(inp));
|
res->add_input(std::move(inp));
|
||||||
|
@ -972,7 +904,7 @@ ggml_tensor * llm_graph_context::build_inp_cls() const {
|
||||||
|
|
||||||
auto & cur = inp->cls;
|
auto & cur = inp->cls;
|
||||||
|
|
||||||
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq);
|
||||||
ggml_set_input(cur);
|
ggml_set_input(cur);
|
||||||
|
|
||||||
res->add_input(std::move(inp));
|
res->add_input(std::move(inp));
|
||||||
|
|
|
@ -95,14 +95,14 @@ public:
|
||||||
|
|
||||||
class llm_graph_input_pos : public llm_graph_input_i {
|
class llm_graph_input_pos : public llm_graph_input_i {
|
||||||
public:
|
public:
|
||||||
llm_graph_input_pos(int64_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {}
|
llm_graph_input_pos(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {}
|
||||||
virtual ~llm_graph_input_pos() = default;
|
virtual ~llm_graph_input_pos() = default;
|
||||||
|
|
||||||
void set_input(const llama_ubatch * ubatch) override;
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
ggml_tensor * pos = nullptr; // I32 [n_batch]
|
ggml_tensor * pos = nullptr; // I32 [n_batch]
|
||||||
|
|
||||||
const int64_t n_pos_per_embd = 1;
|
const uint32_t n_pos_per_embd = 1;
|
||||||
};
|
};
|
||||||
|
|
||||||
// temperature tuning, used by llama4
|
// temperature tuning, used by llama4
|
||||||
|
@ -464,8 +464,6 @@ struct llm_graph_context {
|
||||||
|
|
||||||
llm_graph_context(const llm_graph_params & params);
|
llm_graph_context(const llm_graph_params & params);
|
||||||
|
|
||||||
int64_t n_pos_per_embd() const;
|
|
||||||
|
|
||||||
void cb(ggml_tensor * cur, const char * name, int il) const;
|
void cb(ggml_tensor * cur, const char * name, int il) const;
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
|
@ -90,6 +90,10 @@ bool llama_hparams::is_recurrent(uint32_t il) const {
|
||||||
return recurrent_layer_arr[il];
|
return recurrent_layer_arr[il];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t llama_hparams::n_pos_per_embd() const {
|
||||||
|
return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
|
||||||
|
}
|
||||||
|
|
||||||
bool llama_hparams::is_swa(uint32_t il) const {
|
bool llama_hparams::is_swa(uint32_t il) const {
|
||||||
if (il < n_layer) {
|
if (il < n_layer) {
|
||||||
return swa_layers[il];
|
return swa_layers[il];
|
||||||
|
|
|
@ -192,6 +192,8 @@ struct llama_hparams {
|
||||||
// whether or not the given layer is recurrent (for hybrid models)
|
// whether or not the given layer is recurrent (for hybrid models)
|
||||||
bool is_recurrent(uint32_t il) const;
|
bool is_recurrent(uint32_t il) const;
|
||||||
|
|
||||||
|
uint32_t n_pos_per_embd() const;
|
||||||
|
|
||||||
bool is_swa(uint32_t il) const;
|
bool is_swa(uint32_t il) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -95,19 +95,22 @@ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const {
|
||||||
return kv_swa->seq_pos_max(seq_id);
|
return kv_swa->seq_pos_max(seq_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) {
|
llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
|
||||||
GGML_UNUSED(embd_all);
|
GGML_UNUSED(embd_all);
|
||||||
|
|
||||||
// first try simple split
|
// first try simple split
|
||||||
do {
|
do {
|
||||||
auto sbatch = llama_sbatch(batch, hparams.n_embd, true);
|
balloc.split_reset();
|
||||||
|
|
||||||
std::vector<llama_ubatch> ubatches;
|
std::vector<llama_ubatch> ubatches;
|
||||||
|
while (true) {
|
||||||
|
auto ubatch = balloc.split_simple(n_ubatch);
|
||||||
|
|
||||||
while (sbatch.n_tokens > 0) {
|
if (ubatch.n_tokens == 0) {
|
||||||
auto ubatch = sbatch.split_simple(n_ubatch);
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
ubatches.push_back(ubatch);
|
ubatches.push_back(std::move(ubatch)); // NOLINT
|
||||||
}
|
}
|
||||||
|
|
||||||
auto heads_base = kv_base->prepare(ubatches);
|
auto heads_base = kv_base->prepare(ubatches);
|
||||||
|
@ -123,19 +126,22 @@ llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch
|
||||||
assert(heads_base.size() == heads_swa.size());
|
assert(heads_base.size() == heads_swa.size());
|
||||||
|
|
||||||
return std::make_unique<llama_kv_cache_unified_iswa_state>(
|
return std::make_unique<llama_kv_cache_unified_iswa_state>(
|
||||||
this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches));
|
this, std::move(heads_base), std::move(heads_swa), std::move(ubatches));
|
||||||
} while (false);
|
} while (false);
|
||||||
|
|
||||||
// if it fails, try equal split
|
// if it fails, try equal split
|
||||||
do {
|
do {
|
||||||
auto sbatch = llama_sbatch(batch, hparams.n_embd, false);
|
balloc.split_reset();
|
||||||
|
|
||||||
std::vector<llama_ubatch> ubatches;
|
std::vector<llama_ubatch> ubatches;
|
||||||
|
while (true) {
|
||||||
|
auto ubatch = balloc.split_equal(n_ubatch);
|
||||||
|
|
||||||
while (sbatch.n_tokens > 0) {
|
if (ubatch.n_tokens == 0) {
|
||||||
auto ubatch = sbatch.split_equal(n_ubatch);
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
ubatches.push_back(ubatch);
|
ubatches.push_back(std::move(ubatch)); // NOLINT
|
||||||
}
|
}
|
||||||
|
|
||||||
auto heads_base = kv_base->prepare(ubatches);
|
auto heads_base = kv_base->prepare(ubatches);
|
||||||
|
@ -151,7 +157,7 @@ llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch
|
||||||
assert(heads_base.size() == heads_swa.size());
|
assert(heads_base.size() == heads_swa.size());
|
||||||
|
|
||||||
return std::make_unique<llama_kv_cache_unified_iswa_state>(
|
return std::make_unique<llama_kv_cache_unified_iswa_state>(
|
||||||
this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches));
|
this, std::move(heads_base), std::move(heads_swa), std::move(ubatches));
|
||||||
} while (false);
|
} while (false);
|
||||||
|
|
||||||
// TODO: if we fail again, we should attempt different splitting strategies
|
// TODO: if we fail again, we should attempt different splitting strategies
|
||||||
|
@ -214,15 +220,13 @@ llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(
|
||||||
|
|
||||||
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(
|
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(
|
||||||
llama_kv_cache_unified_iswa * kv,
|
llama_kv_cache_unified_iswa * kv,
|
||||||
llama_sbatch sbatch,
|
|
||||||
std::vector<uint32_t> heads_base,
|
std::vector<uint32_t> heads_base,
|
||||||
std::vector<uint32_t> heads_swa,
|
std::vector<uint32_t> heads_swa,
|
||||||
std::vector<llama_ubatch> ubatches) :
|
std::vector<llama_ubatch> ubatches) :
|
||||||
sbatch(std::move(sbatch)),
|
|
||||||
ubatches(std::move(ubatches)),
|
ubatches(std::move(ubatches)),
|
||||||
// note: here we copy the ubatches. not sure if this is ideal
|
// note: here we copy the ubatches. not sure if this is ideal
|
||||||
state_base(new llama_kv_cache_unified_state(kv->get_base(), {}, std::move(heads_base), this->ubatches)),
|
state_base(new llama_kv_cache_unified_state(kv->get_base(), std::move(heads_base), this->ubatches)),
|
||||||
state_swa (new llama_kv_cache_unified_state(kv->get_swa (), {}, std::move(heads_swa), this->ubatches)),
|
state_swa (new llama_kv_cache_unified_state(kv->get_swa (), std::move(heads_swa), this->ubatches)),
|
||||||
status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) {
|
status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -252,12 +256,6 @@ bool llama_kv_cache_unified_iswa_state::apply() {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int64_t> & llama_kv_cache_unified_iswa_state::out_ids() {
|
|
||||||
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
|
||||||
|
|
||||||
return sbatch.out_ids;
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_memory_status llama_kv_cache_unified_iswa_state::get_status() const {
|
llama_memory_status llama_kv_cache_unified_iswa_state::get_status() const {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ public:
|
||||||
//
|
//
|
||||||
|
|
||||||
llama_memory_state_ptr init_batch(
|
llama_memory_state_ptr init_batch(
|
||||||
const llama_batch & batch,
|
llama_batch_allocr & balloc,
|
||||||
uint32_t n_ubatch,
|
uint32_t n_ubatch,
|
||||||
bool embd_all) override;
|
bool embd_all) override;
|
||||||
|
|
||||||
|
@ -90,7 +90,6 @@ public:
|
||||||
// used to create a state from a batch
|
// used to create a state from a batch
|
||||||
llama_kv_cache_unified_iswa_state(
|
llama_kv_cache_unified_iswa_state(
|
||||||
llama_kv_cache_unified_iswa * kv,
|
llama_kv_cache_unified_iswa * kv,
|
||||||
llama_sbatch sbatch,
|
|
||||||
std::vector<uint32_t> heads_base,
|
std::vector<uint32_t> heads_base,
|
||||||
std::vector<uint32_t> heads_swa,
|
std::vector<uint32_t> heads_swa,
|
||||||
std::vector<llama_ubatch> ubatches);
|
std::vector<llama_ubatch> ubatches);
|
||||||
|
@ -104,8 +103,6 @@ public:
|
||||||
bool next() override;
|
bool next() override;
|
||||||
bool apply() override;
|
bool apply() override;
|
||||||
|
|
||||||
std::vector<int64_t> & out_ids() override;
|
|
||||||
|
|
||||||
llama_memory_status get_status() const override;
|
llama_memory_status get_status() const override;
|
||||||
const llama_ubatch & get_ubatch() const override;
|
const llama_ubatch & get_ubatch() const override;
|
||||||
|
|
||||||
|
@ -119,8 +116,6 @@ public:
|
||||||
private:
|
private:
|
||||||
//llama_kv_cache_unified_iswa * kv;
|
//llama_kv_cache_unified_iswa * kv;
|
||||||
|
|
||||||
llama_sbatch sbatch;
|
|
||||||
|
|
||||||
// the index of the next ubatch to process
|
// the index of the next ubatch to process
|
||||||
size_t i_next = 0;
|
size_t i_next = 0;
|
||||||
|
|
||||||
|
|
|
@ -308,17 +308,23 @@ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_memory_state_ptr llama_kv_cache_unified::init_batch(
|
llama_memory_state_ptr llama_kv_cache_unified::init_batch(
|
||||||
const llama_batch & batch,
|
llama_batch_allocr & balloc,
|
||||||
uint32_t n_ubatch,
|
uint32_t n_ubatch,
|
||||||
bool embd_all) {
|
bool embd_all) {
|
||||||
GGML_UNUSED(embd_all);
|
GGML_UNUSED(embd_all);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
auto sbatch = llama_sbatch(batch, hparams.n_embd, true);
|
balloc.split_reset();
|
||||||
|
|
||||||
std::vector<llama_ubatch> ubatches;
|
std::vector<llama_ubatch> ubatches;
|
||||||
while (sbatch.n_tokens > 0) {
|
while (true) {
|
||||||
ubatches.push_back(sbatch.split_simple(n_ubatch));
|
auto ubatch = balloc.split_simple(n_ubatch);
|
||||||
|
|
||||||
|
if (ubatch.n_tokens == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ubatches.push_back(std::move(ubatch)); // NOLINT
|
||||||
}
|
}
|
||||||
|
|
||||||
auto heads = prepare(ubatches);
|
auto heads = prepare(ubatches);
|
||||||
|
@ -327,7 +333,7 @@ llama_memory_state_ptr llama_kv_cache_unified::init_batch(
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_unique<llama_kv_cache_unified_state>(
|
return std::make_unique<llama_kv_cache_unified_state>(
|
||||||
this, std::move(sbatch), std::move(heads), std::move(ubatches));
|
this, std::move(heads), std::move(ubatches));
|
||||||
} while (false);
|
} while (false);
|
||||||
|
|
||||||
return std::make_unique<llama_kv_cache_unified_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
|
return std::make_unique<llama_kv_cache_unified_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
|
||||||
|
@ -644,12 +650,6 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) {
|
void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) {
|
||||||
if (debug > 0) {
|
|
||||||
LLAMA_LOG_DEBUG("%s: ubatch info:\n", __func__);
|
|
||||||
LLAMA_LOG_DEBUG("%s: n_tokens = %d, equal_seqs = %d\n", __func__, ubatch.n_tokens, ubatch.equal_seqs);
|
|
||||||
LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d, n_seqs = %d\n", __func__, ubatch.n_seq_tokens, ubatch.n_seqs);
|
|
||||||
}
|
|
||||||
|
|
||||||
// keep track of the max sequence position that we would overwrite with this ubatch
|
// keep track of the max sequence position that we would overwrite with this ubatch
|
||||||
// for non-SWA cache, this would be always empty
|
// for non-SWA cache, this would be always empty
|
||||||
llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ];
|
llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ];
|
||||||
|
@ -657,27 +657,22 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch
|
||||||
seq_pos_max_rm[s] = -1;
|
seq_pos_max_rm[s] = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
|
for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
|
||||||
for (uint32_t j = 0; j < ubatch.n_seq_tokens; ++j) {
|
if (!cells.is_empty(head_cur + i)) {
|
||||||
const uint32_t idx = s*ubatch.n_seq_tokens + j;
|
assert(cells.seq_count(head_cur + i) == 1);
|
||||||
|
|
||||||
if (!cells.is_empty(head_cur + idx)) {
|
const llama_seq_id seq_id = cells.seq_get(head_cur + i);
|
||||||
assert(cells.seq_count(head_cur + idx) == 1);
|
const llama_pos pos = cells.pos_get(head_cur + i);
|
||||||
|
|
||||||
const llama_seq_id seq_id = cells.seq_get(head_cur + idx);
|
|
||||||
const llama_pos pos = cells.pos_get(head_cur + idx);
|
|
||||||
|
|
||||||
seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos);
|
seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos);
|
||||||
|
|
||||||
cells.rm(head_cur + idx);
|
cells.rm(head_cur + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
cells.pos_set(head_cur + idx, ubatch.pos[idx]);
|
cells.pos_set(head_cur + i, ubatch.pos[i]);
|
||||||
|
|
||||||
// TODO: fix indexing [UBATCH_IDX]
|
for (int32_t s = 0; s < ubatch.n_seq_id[i]; s++) {
|
||||||
for (int32_t i = 0; i < ubatch.n_seq_id[s]; i++) {
|
cells.seq_add(head_cur + i, ubatch.seq_id[i][s]);
|
||||||
cells.seq_add(head_cur + idx, ubatch.seq_id[s][i]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -696,6 +691,7 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch
|
||||||
seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1);
|
seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// move the head at the end of the slot
|
// move the head at the end of the slot
|
||||||
head = head_cur + ubatch.n_tokens;
|
head = head_cur + ubatch.n_tokens;
|
||||||
}
|
}
|
||||||
|
@ -793,8 +789,6 @@ ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_
|
||||||
|
|
||||||
void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
|
void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
|
||||||
const uint32_t n_tokens = ubatch->n_tokens;
|
const uint32_t n_tokens = ubatch->n_tokens;
|
||||||
const uint32_t n_seq_tokens = ubatch->n_seq_tokens;
|
|
||||||
const uint32_t n_seqs = ubatch->n_seqs;
|
|
||||||
|
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
|
GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
|
||||||
float * data = (float *) dst->data;
|
float * data = (float *) dst->data;
|
||||||
|
@ -814,26 +808,23 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub
|
||||||
// xxxxx-----
|
// xxxxx-----
|
||||||
// To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615
|
// To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615
|
||||||
for (uint32_t h = 0; h < 1; ++h) {
|
for (uint32_t h = 0; h < 1; ++h) {
|
||||||
for (uint32_t s = 0; s < n_seqs; ++s) {
|
for (uint32_t i = 0; i < n_tokens; ++i) {
|
||||||
const llama_seq_id seq_id = ubatch->seq_id[s][0];
|
const llama_seq_id seq_id = ubatch->seq_id[i][0];
|
||||||
|
|
||||||
for (uint32_t j = 0; j < n_seq_tokens; ++j) {
|
const llama_pos p1 = ubatch->pos[i];
|
||||||
const uint32_t idx = s*n_seq_tokens + j;
|
|
||||||
|
|
||||||
const llama_pos p1 = ubatch->pos[idx];
|
for (uint32_t j = 0; j < n_kv; ++j) {
|
||||||
|
|
||||||
for (uint32_t i = 0; i < n_kv; ++i) {
|
|
||||||
float f = 0.0f;
|
float f = 0.0f;
|
||||||
|
|
||||||
bool masked = false;
|
bool masked = false;
|
||||||
|
|
||||||
if (cells.is_empty(i)) {
|
if (cells.is_empty(j)) {
|
||||||
masked = true;
|
masked = true;
|
||||||
} else {
|
} else {
|
||||||
const llama_pos p0 = cells.pos_get(i);
|
const llama_pos p0 = cells.pos_get(j);
|
||||||
|
|
||||||
// mask the token if not the same sequence
|
// mask the token if not the same sequence
|
||||||
masked = masked || (!cells.seq_has(i, seq_id));
|
masked = masked || (!cells.seq_has(j, seq_id));
|
||||||
|
|
||||||
// mask future tokens
|
// mask future tokens
|
||||||
masked = masked || (causal_attn && p0 > p1);
|
masked = masked || (causal_attn && p0 > p1);
|
||||||
|
@ -850,16 +841,15 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub
|
||||||
f = -INFINITY;
|
f = -INFINITY;
|
||||||
}
|
}
|
||||||
|
|
||||||
data[h*(n_kv*n_tokens) + idx*n_kv + i] = f;
|
data[h*(n_kv*n_tokens) + i*n_kv + j] = f;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// mask padded tokens
|
// mask padded tokens
|
||||||
if (data) {
|
if (data) {
|
||||||
for (uint32_t j = n_tokens; j < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++j) {
|
for (uint32_t i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
|
||||||
for (uint32_t i = 0; i < n_kv; ++i) {
|
for (uint32_t j = 0; j < n_kv; ++j) {
|
||||||
data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY;
|
data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -887,12 +877,12 @@ void llama_kv_cache_unified::set_input_pos_bucket(ggml_tensor * dst, const llama
|
||||||
const int32_t n_kv = dst->ne[0];
|
const int32_t n_kv = dst->ne[0];
|
||||||
|
|
||||||
for (int h = 0; h < 1; ++h) {
|
for (int h = 0; h < 1; ++h) {
|
||||||
for (int j = 0; j < n_tokens; ++j) {
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
for (int i = 0; i < n_kv; ++i) {
|
for (int j = 0; j < n_kv; ++j) {
|
||||||
// the position when the cells is empty is irrelevant - it will be masked out later in the attention
|
// the position when the cells is empty is irrelevant - it will be masked out later in the attention
|
||||||
const llama_pos p0 = cells.is_empty(i) ? -1 : cells.pos_get(i);
|
const llama_pos p0 = cells.is_empty(j) ? -1 : cells.pos_get(j);
|
||||||
|
|
||||||
data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(p0, ubatch->pos[j], hparams.n_rel_attn_bkts, false);
|
data[h*(n_kv*n_tokens) + i*n_kv + j] = llama_relative_position_bucket(p0, ubatch->pos[i], hparams.n_rel_attn_bkts, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1509,12 +1499,9 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell
|
||||||
|
|
||||||
seq_rm(dest_seq_id, -1, -1);
|
seq_rm(dest_seq_id, -1, -1);
|
||||||
|
|
||||||
llama_sbatch sbatch;
|
llama_batch_allocr balloc(hparams.n_pos_per_embd());
|
||||||
llama_ubatch ubatch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
|
|
||||||
|
|
||||||
ubatch.n_tokens = cell_count;
|
llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
|
||||||
ubatch.n_seq_tokens = cell_count;
|
|
||||||
ubatch.n_seqs = 1;
|
|
||||||
|
|
||||||
for (uint32_t i = 0; i < cell_count; ++i) {
|
for (uint32_t i = 0; i < cell_count; ++i) {
|
||||||
llama_pos pos;
|
llama_pos pos;
|
||||||
|
@ -1746,9 +1733,8 @@ llama_kv_cache_unified_state::llama_kv_cache_unified_state(
|
||||||
|
|
||||||
llama_kv_cache_unified_state::llama_kv_cache_unified_state(
|
llama_kv_cache_unified_state::llama_kv_cache_unified_state(
|
||||||
llama_kv_cache_unified * kv,
|
llama_kv_cache_unified * kv,
|
||||||
llama_sbatch sbatch,
|
|
||||||
llama_kv_cache_unified::ubatch_heads heads,
|
llama_kv_cache_unified::ubatch_heads heads,
|
||||||
std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), sbatch(std::move(sbatch)), heads(std::move(heads)), ubatches(std::move(ubatches)) {
|
std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), heads(std::move(heads)), ubatches(std::move(ubatches)) {
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_kv_cache_unified_state::~llama_kv_cache_unified_state() = default;
|
llama_kv_cache_unified_state::~llama_kv_cache_unified_state() = default;
|
||||||
|
@ -1781,12 +1767,6 @@ bool llama_kv_cache_unified_state::apply() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int64_t> & llama_kv_cache_unified_state::out_ids() {
|
|
||||||
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
|
||||||
|
|
||||||
return sbatch.out_ids;
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_memory_status llama_kv_cache_unified_state::get_status() const {
|
llama_memory_status llama_kv_cache_unified_state::get_status() const {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,7 @@ public:
|
||||||
//
|
//
|
||||||
|
|
||||||
llama_memory_state_ptr init_batch(
|
llama_memory_state_ptr init_batch(
|
||||||
const llama_batch & batch,
|
llama_batch_allocr & balloc,
|
||||||
uint32_t n_ubatch,
|
uint32_t n_ubatch,
|
||||||
bool embd_all) override;
|
bool embd_all) override;
|
||||||
|
|
||||||
|
@ -231,7 +231,6 @@ public:
|
||||||
// used to create a decode state from a batch
|
// used to create a decode state from a batch
|
||||||
llama_kv_cache_unified_state(
|
llama_kv_cache_unified_state(
|
||||||
llama_kv_cache_unified * kv,
|
llama_kv_cache_unified * kv,
|
||||||
llama_sbatch sbatch,
|
|
||||||
ubatch_heads heads,
|
ubatch_heads heads,
|
||||||
std::vector<llama_ubatch> ubatches);
|
std::vector<llama_ubatch> ubatches);
|
||||||
|
|
||||||
|
@ -244,8 +243,6 @@ public:
|
||||||
bool next() override;
|
bool next() override;
|
||||||
bool apply() override;
|
bool apply() override;
|
||||||
|
|
||||||
std::vector<int64_t> & out_ids() override;
|
|
||||||
|
|
||||||
llama_memory_status get_status() const override;
|
llama_memory_status get_status() const override;
|
||||||
const llama_ubatch & get_ubatch() const override;
|
const llama_ubatch & get_ubatch() const override;
|
||||||
|
|
||||||
|
@ -286,8 +283,6 @@ private:
|
||||||
// batch processing state
|
// batch processing state
|
||||||
//
|
//
|
||||||
|
|
||||||
llama_sbatch sbatch;
|
|
||||||
|
|
||||||
// the index of the next ubatch to process
|
// the index of the next ubatch to process
|
||||||
size_t i_next = 0;
|
size_t i_next = 0;
|
||||||
|
|
||||||
|
|
|
@ -384,10 +384,10 @@ private:
|
||||||
//
|
//
|
||||||
std::vector<llama_pos> shift;
|
std::vector<llama_pos> shift;
|
||||||
|
|
||||||
using bits_t = std::bitset<LLAMA_MAX_SEQ>;
|
using seq_set_t = std::bitset<LLAMA_MAX_SEQ>;
|
||||||
|
|
||||||
// the bitset seq[i] tells us which sequences are currently occupying the i-th cell
|
// the bitset seq[i] tells us which sequences are currently occupying the i-th cell
|
||||||
std::vector<bits_t> seq;
|
std::vector<seq_set_t> seq;
|
||||||
|
|
||||||
// the set seq_pos[s] tells us which positions are currently present for sequence s
|
// the set seq_pos[s] tells us which positions are currently present for sequence s
|
||||||
// this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache
|
// this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache
|
||||||
|
|
|
@ -32,7 +32,7 @@ llama_memory_hybrid::llama_memory_hybrid(
|
||||||
mem_attn(new llama_kv_cache_unified(
|
mem_attn(new llama_kv_cache_unified(
|
||||||
model,
|
model,
|
||||||
filter_attn == nullptr ?
|
filter_attn == nullptr ?
|
||||||
[&](int32_t il) { return !model.hparams.is_recurrent(il); }
|
[&](int32_t il) { return !hparams.is_recurrent(il); }
|
||||||
: filter_attn,
|
: filter_attn,
|
||||||
type_k,
|
type_k,
|
||||||
type_v,
|
type_v,
|
||||||
|
@ -47,7 +47,7 @@ llama_memory_hybrid::llama_memory_hybrid(
|
||||||
mem_recr(new llama_memory_recurrent(
|
mem_recr(new llama_memory_recurrent(
|
||||||
model,
|
model,
|
||||||
filter_recr == nullptr ?
|
filter_recr == nullptr ?
|
||||||
[&](int32_t il) { return model.hparams.is_recurrent(il); }
|
[&](int32_t il) { return hparams.is_recurrent(il); }
|
||||||
: filter_recr,
|
: filter_recr,
|
||||||
type_r,
|
type_r,
|
||||||
type_s,
|
type_s,
|
||||||
|
@ -56,24 +56,28 @@ llama_memory_hybrid::llama_memory_hybrid(
|
||||||
n_seq_max
|
n_seq_max
|
||||||
)) {}
|
)) {}
|
||||||
|
|
||||||
llama_memory_state_ptr llama_memory_hybrid::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled) {
|
llama_memory_state_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
|
||||||
|
do {
|
||||||
// since this includes a recurrent cache, we cannot use split_simple
|
balloc.split_reset();
|
||||||
auto sbatch = llama_sbatch(batch, hparams.n_embd, false);
|
|
||||||
|
|
||||||
// follow the recurrent pattern for creating the ubatch splits
|
// follow the recurrent pattern for creating the ubatch splits
|
||||||
std::vector<llama_ubatch> ubatches;
|
std::vector<llama_ubatch> ubatches;
|
||||||
while (sbatch.n_tokens > 0) {
|
|
||||||
|
while (true) {
|
||||||
llama_ubatch ubatch;
|
llama_ubatch ubatch;
|
||||||
|
|
||||||
if (embd_pooled) {
|
if (embd_all) {
|
||||||
// Pooled embeddings cannot be split across ubatches (yet)
|
// if all tokens are output, split by sequence
|
||||||
ubatch = sbatch.split_seq(n_ubatch);
|
ubatch = balloc.split_seq(n_ubatch);
|
||||||
} else {
|
} else {
|
||||||
ubatch = sbatch.split_equal(n_ubatch);
|
ubatch = balloc.split_equal(n_ubatch);
|
||||||
}
|
}
|
||||||
|
|
||||||
ubatches.push_back(ubatch);
|
if (ubatch.n_tokens == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ubatches.push_back(std::move(ubatch)); // NOLINT
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepare the recurrent batches first
|
// prepare the recurrent batches first
|
||||||
|
@ -91,7 +95,10 @@ llama_memory_state_ptr llama_memory_hybrid::init_batch(const llama_batch & batch
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_unique<llama_memory_hybrid_state>(
|
return std::make_unique<llama_memory_hybrid_state>(
|
||||||
this, std::move(sbatch), std::move(heads_attn), std::move(ubatches));
|
this, std::move(heads_attn), std::move(ubatches));
|
||||||
|
} while(false);
|
||||||
|
|
||||||
|
return std::make_unique<llama_memory_hybrid_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_memory_state_ptr llama_memory_hybrid::init_full() {
|
llama_memory_state_ptr llama_memory_hybrid::init_full() {
|
||||||
|
@ -188,15 +195,13 @@ llama_memory_hybrid_state::llama_memory_hybrid_state(
|
||||||
|
|
||||||
llama_memory_hybrid_state::llama_memory_hybrid_state(
|
llama_memory_hybrid_state::llama_memory_hybrid_state(
|
||||||
llama_memory_hybrid * mem,
|
llama_memory_hybrid * mem,
|
||||||
llama_sbatch sbatch,
|
|
||||||
std::vector<uint32_t> heads_attn,
|
std::vector<uint32_t> heads_attn,
|
||||||
std::vector<llama_ubatch> ubatches) :
|
std::vector<llama_ubatch> ubatches) :
|
||||||
sbatch(std::move(sbatch)),
|
|
||||||
ubatches(std::move(ubatches)),
|
ubatches(std::move(ubatches)),
|
||||||
// note: here we copy the ubatches. not sure if this is ideal
|
// note: here we copy the ubatches. not sure if this is ideal
|
||||||
state_attn(new llama_kv_cache_unified_state(mem->get_mem_attn(), {}, std::move(heads_attn), this->ubatches)),
|
state_attn(new llama_kv_cache_unified_state(mem->get_mem_attn(), std::move(heads_attn), this->ubatches)),
|
||||||
state_recr(new llama_memory_recurrent_state(mem->get_mem_recr(), {}, this->ubatches)),
|
state_recr(new llama_memory_recurrent_state(mem->get_mem_recr(), this->ubatches)),
|
||||||
status(LLAMA_MEMORY_STATUS_SUCCESS) {
|
status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool llama_memory_hybrid_state::next() {
|
bool llama_memory_hybrid_state::next() {
|
||||||
|
@ -223,12 +228,6 @@ bool llama_memory_hybrid_state::apply() {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int64_t> & llama_memory_hybrid_state::out_ids() {
|
|
||||||
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
|
||||||
|
|
||||||
return sbatch.out_ids;
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_memory_status llama_memory_hybrid_state::get_status() const {
|
llama_memory_status llama_memory_hybrid_state::get_status() const {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,9 +50,9 @@ public:
|
||||||
//
|
//
|
||||||
|
|
||||||
llama_memory_state_ptr init_batch(
|
llama_memory_state_ptr init_batch(
|
||||||
const llama_batch & batch,
|
llama_batch_allocr & balloc,
|
||||||
uint32_t n_ubatch,
|
uint32_t n_ubatch,
|
||||||
bool embd_pooled) override;
|
bool embd_all) override;
|
||||||
|
|
||||||
llama_memory_state_ptr init_full() override;
|
llama_memory_state_ptr init_full() override;
|
||||||
|
|
||||||
|
@ -107,7 +107,6 @@ public:
|
||||||
// init success
|
// init success
|
||||||
llama_memory_hybrid_state(
|
llama_memory_hybrid_state(
|
||||||
llama_memory_hybrid * mem,
|
llama_memory_hybrid * mem,
|
||||||
llama_sbatch sbatch,
|
|
||||||
std::vector<uint32_t> heads_attn,
|
std::vector<uint32_t> heads_attn,
|
||||||
std::vector<llama_ubatch> ubatches);
|
std::vector<llama_ubatch> ubatches);
|
||||||
|
|
||||||
|
@ -116,8 +115,6 @@ public:
|
||||||
bool next() override;
|
bool next() override;
|
||||||
bool apply() override;
|
bool apply() override;
|
||||||
|
|
||||||
std::vector<int64_t> & out_ids() override;
|
|
||||||
|
|
||||||
llama_memory_status get_status() const override;
|
llama_memory_status get_status() const override;
|
||||||
const llama_ubatch & get_ubatch() const override;
|
const llama_ubatch & get_ubatch() const override;
|
||||||
|
|
||||||
|
@ -129,8 +126,6 @@ public:
|
||||||
const llama_memory_recurrent_state * get_state_recr() const;
|
const llama_memory_recurrent_state * get_state_recr() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
llama_sbatch sbatch;
|
|
||||||
|
|
||||||
// the index of the next ubatch to process
|
// the index of the next ubatch to process
|
||||||
size_t i_next = 0;
|
size_t i_next = 0;
|
||||||
|
|
||||||
|
|
|
@ -362,29 +362,31 @@ llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_memory_state_ptr llama_memory_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) {
|
llama_memory_state_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
|
||||||
auto sbatch = llama_sbatch(batch, hparams.n_embd, false);
|
|
||||||
|
|
||||||
std::vector<llama_ubatch> ubatches;
|
std::vector<llama_ubatch> ubatches;
|
||||||
|
|
||||||
while (sbatch.n_tokens > 0) {
|
while (true) {
|
||||||
llama_ubatch ubatch;
|
llama_ubatch ubatch;
|
||||||
|
|
||||||
if (embd_all) {
|
if (embd_all) {
|
||||||
// if all tokens are output, split by sequence
|
// if all tokens are output, split by sequence
|
||||||
ubatch = sbatch.split_seq(n_ubatch);
|
ubatch = balloc.split_seq(n_ubatch);
|
||||||
} else {
|
} else {
|
||||||
ubatch = sbatch.split_equal(n_ubatch);
|
ubatch = balloc.split_equal(n_ubatch);
|
||||||
}
|
}
|
||||||
|
|
||||||
ubatches.push_back(ubatch);
|
if (ubatch.n_tokens == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ubatches.push_back(std::move(ubatch)); // NOLINT
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!prepare(ubatches)) {
|
if (!prepare(ubatches)) {
|
||||||
return std::make_unique<llama_memory_recurrent_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
|
return std::make_unique<llama_memory_recurrent_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_unique<llama_memory_recurrent_state>(this, std::move(sbatch), std::move(ubatches));
|
return std::make_unique<llama_memory_recurrent_state>(this, std::move(ubatches));
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_memory_state_ptr llama_memory_recurrent::init_full() {
|
llama_memory_state_ptr llama_memory_recurrent::init_full() {
|
||||||
|
@ -423,9 +425,8 @@ bool llama_memory_recurrent::prepare(const std::vector<llama_ubatch> & ubatches)
|
||||||
}
|
}
|
||||||
|
|
||||||
bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
|
bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
|
||||||
const uint32_t n_seqs = ubatch.n_seqs;
|
|
||||||
|
|
||||||
const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
|
const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
|
||||||
|
const uint32_t n_seqs = ubatch.n_seqs;
|
||||||
|
|
||||||
// if we have enough unused cells before the current head ->
|
// if we have enough unused cells before the current head ->
|
||||||
// better to start searching from the beginning of the cache, hoping to fill it
|
// better to start searching from the beginning of the cache, hoping to fill it
|
||||||
|
@ -445,9 +446,11 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
|
||||||
|
|
||||||
// everything should fit if all seq_ids are smaller than the max
|
// everything should fit if all seq_ids are smaller than the max
|
||||||
for (uint32_t s = 0; s < n_seqs; ++s) {
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
||||||
const uint32_t n_seq_id = ubatch.n_seq_id[s];
|
const uint32_t i = s*n_seq_tokens; // first token of sequence set s
|
||||||
|
const uint32_t n_seq_id = ubatch.n_seq_id[i];
|
||||||
|
|
||||||
for (uint32_t j = 0; j < n_seq_id; ++j) {
|
for (uint32_t j = 0; j < n_seq_id; ++j) {
|
||||||
const llama_seq_id seq_id = ubatch.seq_id[s][j];
|
const llama_seq_id seq_id = ubatch.seq_id[i][j];
|
||||||
|
|
||||||
if (seq_id < 0 || (uint32_t) seq_id >= size) {
|
if (seq_id < 0 || (uint32_t) seq_id >= size) {
|
||||||
// too big seq_id
|
// too big seq_id
|
||||||
|
@ -506,7 +509,8 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
|
||||||
|
|
||||||
// find usable cell range
|
// find usable cell range
|
||||||
for (uint32_t s = 0; s < n_seqs; ++s) {
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
||||||
const llama_seq_id seq_id = ubatch.seq_id[s][0];
|
const uint32_t i = s*n_seq_tokens;
|
||||||
|
const llama_seq_id seq_id = ubatch.seq_id[i][0];
|
||||||
auto & seq_meta = cells[seq_id];
|
auto & seq_meta = cells[seq_id];
|
||||||
bool has_cell = false;
|
bool has_cell = false;
|
||||||
if (seq_meta.tail >= 0) {
|
if (seq_meta.tail >= 0) {
|
||||||
|
@ -530,7 +534,7 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
|
||||||
seq_meta.tail = next_empty_cell;
|
seq_meta.tail = next_empty_cell;
|
||||||
// find next empty cell
|
// find next empty cell
|
||||||
if (s + 1 < n_seqs) {
|
if (s + 1 < n_seqs) {
|
||||||
for (uint32_t i = 0; i < size; ++i) {
|
for (uint32_t j = 0; j < size; ++j) {
|
||||||
next_empty_cell += 1;
|
next_empty_cell += 1;
|
||||||
if (next_empty_cell >= size) { next_empty_cell -= size; }
|
if (next_empty_cell >= size) { next_empty_cell -= size; }
|
||||||
auto & cell = cells[next_empty_cell];
|
auto & cell = cells[next_empty_cell];
|
||||||
|
@ -544,8 +548,9 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
|
||||||
|
|
||||||
// gather and re-order
|
// gather and re-order
|
||||||
for (uint32_t s = 0; s < n_seqs; ++s) {
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
||||||
|
const uint32_t i = s*n_seq_tokens;
|
||||||
const int32_t dst_id = s + min;
|
const int32_t dst_id = s + min;
|
||||||
const int32_t src_id = cells[ubatch.seq_id[s][0]].tail;
|
const int32_t src_id = cells[ubatch.seq_id[i][0]].tail;
|
||||||
if (dst_id != src_id) {
|
if (dst_id != src_id) {
|
||||||
auto & dst_cell = cells[dst_id];
|
auto & dst_cell = cells[dst_id];
|
||||||
auto & src_cell = cells[src_id];
|
auto & src_cell = cells[src_id];
|
||||||
|
@ -555,8 +560,8 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
|
||||||
std::swap(dst_cell.seq_id, src_cell.seq_id);
|
std::swap(dst_cell.seq_id, src_cell.seq_id);
|
||||||
|
|
||||||
// swap tails
|
// swap tails
|
||||||
for (uint32_t i = 0; i < size; ++i) {
|
for (uint32_t j = 0; j < size; ++j) {
|
||||||
int32_t & tail = cells[i].tail;
|
int32_t & tail = cells[j].tail;
|
||||||
if (tail == src_id) {
|
if (tail == src_id) {
|
||||||
tail = dst_id;
|
tail = dst_id;
|
||||||
} else if (tail == dst_id) {
|
} else if (tail == dst_id) {
|
||||||
|
@ -568,7 +573,8 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
|
||||||
|
|
||||||
// update the pos of the used seqs
|
// update the pos of the used seqs
|
||||||
for (uint32_t s = 0; s < n_seqs; ++s) {
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
||||||
const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1];
|
const uint32_t i = s*n_seq_tokens;
|
||||||
|
const llama_pos last_pos = ubatch.pos[i + n_seq_tokens - 1];
|
||||||
const int32_t cell_id = s + min;
|
const int32_t cell_id = s + min;
|
||||||
auto & cell = cells[cell_id];
|
auto & cell = cells[cell_id];
|
||||||
|
|
||||||
|
@ -576,12 +582,12 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
|
||||||
// What should happen when the pos backtracks or skips a value?
|
// What should happen when the pos backtracks or skips a value?
|
||||||
// Clearing the state mid-batch would require special-casing which isn't done.
|
// Clearing the state mid-batch would require special-casing which isn't done.
|
||||||
LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
|
LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
|
||||||
__func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens);
|
__func__, last_pos, cell.pos, ubatch.seq_id[i][0], n_seq_tokens);
|
||||||
}
|
}
|
||||||
cell.pos = last_pos;
|
cell.pos = last_pos;
|
||||||
cell.seq_id.clear();
|
cell.seq_id.clear();
|
||||||
for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) {
|
for (int32_t j = 0; j < ubatch.n_seq_id[i]; ++j) {
|
||||||
const llama_seq_id seq_id = ubatch.seq_id[s][j];
|
const llama_seq_id seq_id = ubatch.seq_id[i][j];
|
||||||
cell.seq_id.insert(seq_id);
|
cell.seq_id.insert(seq_id);
|
||||||
cells[seq_id].tail = cell_id;
|
cells[seq_id].tail = cell_id;
|
||||||
}
|
}
|
||||||
|
@ -827,12 +833,9 @@ bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell
|
||||||
|
|
||||||
seq_rm(dest_seq_id, -1, -1);
|
seq_rm(dest_seq_id, -1, -1);
|
||||||
|
|
||||||
llama_sbatch sbatch;
|
llama_batch_allocr balloc(hparams.n_pos_per_embd());
|
||||||
llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
|
|
||||||
|
|
||||||
batch.n_tokens = cell_count;
|
llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
|
||||||
batch.n_seq_tokens = cell_count;
|
|
||||||
batch.n_seqs = 1;
|
|
||||||
|
|
||||||
for (uint32_t i = 0; i < cell_count; ++i) {
|
for (uint32_t i = 0; i < cell_count; ++i) {
|
||||||
llama_pos pos;
|
llama_pos pos;
|
||||||
|
@ -846,12 +849,12 @@ bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
batch.pos[i] = pos;
|
ubatch.pos[i] = pos;
|
||||||
}
|
}
|
||||||
batch.n_seq_id[0] = 1;
|
ubatch.n_seq_id[0] = 1;
|
||||||
batch.seq_id[0] = &dest_seq_id;
|
ubatch.seq_id[0] = &dest_seq_id;
|
||||||
|
|
||||||
if (!find_slot(batch)) {
|
if (!find_slot(ubatch)) {
|
||||||
LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
|
LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -859,8 +862,8 @@ bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell
|
||||||
// DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
|
// DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
|
||||||
// Assume that this is one contiguous block of cells
|
// Assume that this is one contiguous block of cells
|
||||||
GGML_ASSERT(head + cell_count <= size);
|
GGML_ASSERT(head + cell_count <= size);
|
||||||
GGML_ASSERT(cells[head].pos == batch.pos[0]);
|
GGML_ASSERT(cells[head].pos == ubatch.pos[0]);
|
||||||
GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]);
|
GGML_ASSERT(cells[head + cell_count - 1].pos == ubatch.pos[cell_count - 1]);
|
||||||
GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
|
GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
|
||||||
GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
|
GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
|
||||||
} else {
|
} else {
|
||||||
|
@ -1048,8 +1051,7 @@ llama_memory_recurrent_state::llama_memory_recurrent_state(
|
||||||
|
|
||||||
llama_memory_recurrent_state::llama_memory_recurrent_state(
|
llama_memory_recurrent_state::llama_memory_recurrent_state(
|
||||||
llama_memory_recurrent * mem,
|
llama_memory_recurrent * mem,
|
||||||
llama_sbatch sbatch,
|
std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), ubatches(std::move(ubatches)) {}
|
||||||
std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), sbatch(std::move(sbatch)), ubatches(std::move(ubatches)) {}
|
|
||||||
|
|
||||||
llama_memory_recurrent_state::~llama_memory_recurrent_state() = default;
|
llama_memory_recurrent_state::~llama_memory_recurrent_state() = default;
|
||||||
|
|
||||||
|
@ -1071,12 +1073,6 @@ bool llama_memory_recurrent_state::apply() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int64_t> & llama_memory_recurrent_state::out_ids() {
|
|
||||||
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
|
||||||
|
|
||||||
return sbatch.out_ids;
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_memory_status llama_memory_recurrent_state::get_status() const {
|
llama_memory_status llama_memory_recurrent_state::get_status() const {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ public:
|
||||||
//
|
//
|
||||||
|
|
||||||
llama_memory_state_ptr init_batch(
|
llama_memory_state_ptr init_batch(
|
||||||
const llama_batch & batch,
|
llama_batch_allocr & balloc,
|
||||||
uint32_t n_ubatch,
|
uint32_t n_ubatch,
|
||||||
bool embd_all) override;
|
bool embd_all) override;
|
||||||
|
|
||||||
|
@ -137,7 +137,6 @@ public:
|
||||||
// used to create a state from a batch
|
// used to create a state from a batch
|
||||||
llama_memory_recurrent_state(
|
llama_memory_recurrent_state(
|
||||||
llama_memory_recurrent * mem,
|
llama_memory_recurrent * mem,
|
||||||
llama_sbatch sbatch,
|
|
||||||
std::vector<llama_ubatch> ubatches);
|
std::vector<llama_ubatch> ubatches);
|
||||||
|
|
||||||
virtual ~llama_memory_recurrent_state();
|
virtual ~llama_memory_recurrent_state();
|
||||||
|
@ -149,8 +148,6 @@ public:
|
||||||
bool next() override;
|
bool next() override;
|
||||||
bool apply() override;
|
bool apply() override;
|
||||||
|
|
||||||
std::vector<int64_t> & out_ids() override;
|
|
||||||
|
|
||||||
llama_memory_status get_status() const override;
|
llama_memory_status get_status() const override;
|
||||||
const llama_ubatch & get_ubatch() const override;
|
const llama_ubatch & get_ubatch() const override;
|
||||||
|
|
||||||
|
@ -173,8 +170,6 @@ private:
|
||||||
|
|
||||||
llama_memory_recurrent * mem;
|
llama_memory_recurrent * mem;
|
||||||
|
|
||||||
llama_sbatch sbatch;
|
|
||||||
|
|
||||||
size_t i_next = 0;
|
size_t i_next = 0;
|
||||||
|
|
||||||
std::vector<llama_ubatch> ubatches;
|
std::vector<llama_ubatch> ubatches;
|
||||||
|
|
|
@ -7,6 +7,8 @@
|
||||||
|
|
||||||
struct llama_ubatch;
|
struct llama_ubatch;
|
||||||
|
|
||||||
|
class llama_batch_allocr;
|
||||||
|
|
||||||
class llama_io_write_i;
|
class llama_io_write_i;
|
||||||
class llama_io_read_i;
|
class llama_io_read_i;
|
||||||
|
|
||||||
|
@ -50,9 +52,6 @@ struct llama_memory_state_i {
|
||||||
// return false on failure
|
// return false on failure
|
||||||
virtual bool apply() = 0;
|
virtual bool apply() = 0;
|
||||||
|
|
||||||
// TODO: this might get reworked in the future when refactoring llama_batch
|
|
||||||
virtual std::vector<int64_t> & out_ids() = 0;
|
|
||||||
|
|
||||||
// get the current ubatch
|
// get the current ubatch
|
||||||
virtual const llama_ubatch & get_ubatch() const = 0;
|
virtual const llama_ubatch & get_ubatch() const = 0;
|
||||||
|
|
||||||
|
@ -71,7 +70,7 @@ struct llama_memory_i {
|
||||||
// return a state object containing the ubatches and KV cache state required to process them
|
// return a state object containing the ubatches and KV cache state required to process them
|
||||||
// check the llama_memory_state_i::get_status() for the result
|
// check the llama_memory_state_i::get_status() for the result
|
||||||
virtual llama_memory_state_ptr init_batch(
|
virtual llama_memory_state_ptr init_batch(
|
||||||
const llama_batch & batch,
|
llama_batch_allocr & balloc,
|
||||||
uint32_t n_ubatch,
|
uint32_t n_ubatch,
|
||||||
bool embd_all) = 0;
|
bool embd_all) = 0;
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1969,10 +1969,8 @@ struct server_context {
|
||||||
params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? params_base.n_ctx / params_base.n_parallel : params_base.speculative.n_ctx;
|
params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? params_base.n_ctx / params_base.n_parallel : params_base.speculative.n_ctx;
|
||||||
params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers;
|
params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers;
|
||||||
params_dft.n_parallel = 1;
|
params_dft.n_parallel = 1;
|
||||||
|
params_dft.cache_type_k = params_base.speculative.cache_type_k;
|
||||||
// force F16 KV cache for the draft model for extra performance
|
params_dft.cache_type_v = params_base.speculative.cache_type_v;
|
||||||
params_dft.cache_type_k = GGML_TYPE_F16;
|
|
||||||
params_dft.cache_type_v = GGML_TYPE_F16;
|
|
||||||
|
|
||||||
llama_init_dft = common_init_from_params(params_dft);
|
llama_init_dft = common_init_from_params(params_dft);
|
||||||
|
|
||||||
|
@ -3387,38 +3385,6 @@ struct server_context {
|
||||||
llama_set_embeddings(ctx, slot_batched->need_embd());
|
llama_set_embeddings(ctx, slot_batched->need_embd());
|
||||||
}
|
}
|
||||||
|
|
||||||
// pad the batch so that batch.n_tokens >= n_slots
|
|
||||||
// TODO: temporary workaround for https://github.com/ggml-org/llama.cpp/issues/13689
|
|
||||||
if (slot_batched->need_embd()) {
|
|
||||||
const int n_slots = slots.size();
|
|
||||||
|
|
||||||
if (batch.n_tokens < n_slots) {
|
|
||||||
std::set<llama_seq_id> seq_ids;
|
|
||||||
for (int j = 0; j < batch.n_tokens; ++j) {
|
|
||||||
seq_ids.insert(batch.seq_id[j][0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// find unused sequence id
|
|
||||||
llama_seq_id seq_id = -1;
|
|
||||||
for (int i = 0; i < n_slots; ++i) {
|
|
||||||
if (seq_ids.find(i) == seq_ids.end()) {
|
|
||||||
seq_id = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const int n_add = n_slots - batch.n_tokens;
|
|
||||||
|
|
||||||
SRV_WRN("adding %d dummy tokens to the batch, seq_id = %d\n", n_add, seq_id);
|
|
||||||
|
|
||||||
for (int j = 0; j < n_add; ++j) {
|
|
||||||
common_batch_add(batch, 0, j, { seq_id }, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
slots[seq_id].cache_tokens.clear();
|
|
||||||
llama_memory_seq_rm(llama_get_memory(ctx), seq_id, -1, -1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t i_next = 0;
|
int32_t i_next = 0;
|
||||||
|
|
||||||
// process the created batch of tokens
|
// process the created batch of tokens
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue