From 53ae30640e131082d8d19bd80485b47c4553d551 Mon Sep 17 00:00:00 2001 From: Beinsezii <39478211+Beinsezii@users.noreply.github.com> Date: Wed, 28 May 2025 14:50:20 -0700 Subject: [PATCH 01/21] gguf-py : fix SafetensorRemote return on undefined size (< 0) (#13841) --- gguf-py/gguf/utility.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gguf-py/gguf/utility.py b/gguf-py/gguf/utility.py index e5251aef8..00adcbc93 100644 --- a/gguf-py/gguf/utility.py +++ b/gguf-py/gguf/utility.py @@ -231,7 +231,7 @@ class SafetensorRemote: response.raise_for_status() # Get raw byte data - return response.content[:size] + return response.content[slice(size if size > -1 else None)] @classmethod def check_file_exist(cls, url: str) -> bool: From 1b8fb8152d0f3357a9c1d9b4c02f6cc5b9cb7232 Mon Sep 17 00:00:00 2001 From: Vineel Abhinav <131174187+vineelabhinav@users.noreply.github.com> Date: Thu, 29 May 2025 11:31:33 +0530 Subject: [PATCH 02/21] ggml: aarch64: Implement SVE F32 kernels for vector functions (#13843) * F32-Mamba-SVE * F32-Mamba-SVE * Resolve test errors-1 * Resolve test errors-2 * F32-vec-SVE * F32-vec-SVE * F32-vec-SVE --- ggml/src/ggml-cpu/ops.cpp | 219 ++++++++++++++++++---------- ggml/src/ggml-cpu/simd-mappings.h | 118 ++++++++++++++- ggml/src/ggml-cpu/vec.cpp | 101 ++++++++++--- ggml/src/ggml-cpu/vec.h | 231 ++++++++++++++++++++++-------- 4 files changed, 522 insertions(+), 147 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 26501b711..f4692f3cd 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7641,8 +7641,8 @@ static void ggml_compute_forward_ssm_scan_f32( const float * A = (const float *) ((const char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner} const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[1]) + i3*(src4->nb[2])); // {d_state, n_t, n_s} const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[1]) + i3*(src5->nb[2])); // {d_state, n_t, n_s} - float * y = ( float *) (( char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} - float * s = ( float *) (( char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[3]); // {d_state, d_inner, n_s} + float * y = ( float *) (( char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} + float * s = ( float *) (( char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[3]); // {d_state, d_inner, n_s} // use the output as the source for the next token-wise iterations if (i2 > 0) { s0 = s; } @@ -8070,6 +8070,14 @@ static void ggml_compute_forward_rwkv_wkv6_f32( #define GGML_F32X_MUL GGML_F32x16_MUL #define GGML_F32X_FMA GGML_F32x16_FMA #define WKV_VECTOR_SIZE 16 + #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) + #define GGML_F32X GGML_F32xt + #define GGML_F32X_SET1 GGML_F32xt_SET1 + #define GGML_F32X_LOAD GGML_F32xt_LOAD + #define GGML_F32X_STORE GGML_F32xt_STORE + #define GGML_F32X_MUL GGML_F32xt_MUL + #define GGML_F32X_FMA GGML_F32xt_FMA + #define WKV_VECTOR_SIZE 8 #elif defined(__ARM_NEON) && defined(__aarch64__) #define GGML_F32X GGML_F32x4 #define GGML_F32X_SET1 GGML_F32x4_SET1 @@ -8080,8 +8088,14 @@ static void ggml_compute_forward_rwkv_wkv6_f32( #define WKV_VECTOR_SIZE 4 #endif + int wkv_vector_size; #ifdef WKV_VECTOR_SIZE - const int64_t vec_count = head_size / WKV_VECTOR_SIZE; + #if defined(__ARM_FEATURE_SVE) + wkv_vector_size = svcntw(); + #else + wkv_vector_size = WKV_VECTOR_SIZE; + #endif + const int64_t vec_count = head_size / wkv_vector_size; for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; @@ -8111,7 +8125,7 @@ static void ggml_compute_forward_rwkv_wkv6_f32( GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * WKV_VECTOR_SIZE; + size_t base_j = j * wkv_vector_size; size_t t_h_j_offset = t_h_offset + base_j; size_t h_2d_i_j_offset = h_2d_i_offset + base_j; @@ -8136,7 +8150,7 @@ static void ggml_compute_forward_rwkv_wkv6_f32( } // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * WKV_VECTOR_SIZE; j < head_size; j++) { + for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; @@ -8272,6 +8286,14 @@ static void ggml_compute_forward_gla_f32( #define GGML_F32X_MUL GGML_F32x16_MUL #define GGML_F32X_FMA GGML_F32x16_FMA #define GLA_VECTOR_SIZE 16 + #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) + #define GGML_F32X GGML_F32xt + #define GGML_F32X_SET1 GGML_F32xt_SET1 + #define GGML_F32X_LOAD GGML_F32xt_LOAD + #define GGML_F32X_STORE GGML_F32xt_STORE + #define GGML_F32X_MUL GGML_F32xt_MUL + #define GGML_F32X_FMA GGML_F32xt_FMA + #define GLA_VECTOR_SIZE 8 #elif defined(__ARM_NEON) && defined(__aarch64__) #define GGML_F32X GGML_F32x4 #define GGML_F32X_SET1 GGML_F32x4_SET1 @@ -8282,8 +8304,14 @@ static void ggml_compute_forward_gla_f32( #define GLA_VECTOR_SIZE 4 #endif + int gla_vector_size; #ifdef GLA_VECTOR_SIZE - const int64_t vec_count = head_size / GLA_VECTOR_SIZE; + #if defined(__ARM_FEATURE_SVE) + gla_vector_size = svcntw(); + #else + gla_vector_size = GLA_VECTOR_SIZE; + #endif + const int64_t vec_count = head_size / gla_vector_size; for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; @@ -8310,7 +8338,7 @@ static void ggml_compute_forward_gla_f32( GGML_F32X g_vec = GGML_F32X_SET1(g_val); for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * GLA_VECTOR_SIZE; + size_t base_j = j * gla_vector_size; size_t t_h_j_offset = t_h_offset + base_j; size_t h_2d_i_j_offset = h_2d_i_offset + base_j; @@ -8334,7 +8362,7 @@ static void ggml_compute_forward_gla_f32( } // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * GLA_VECTOR_SIZE; j < head_size; j++) { + for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; @@ -8443,83 +8471,126 @@ static void ggml_compute_forward_rwkv_wkv7_f32( int64_t h_stride_2d = head_size * head_size; #if defined(GGML_SIMD) - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; + #if defined(__ARM_FEATURE_SVE) + // scalar Route to scalar implementation //TODO: Write SVE code + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; - for (int64_t ii = 0; ii < head_size; ii++) { - int64_t t_h_i_offset = t_h_offset + ii; - int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; - GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + float v_val = v[t_h_i_offset]; - float sa = 0; - { - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); - ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); - sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); - } + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; } - GGML_F32_VEC_REDUCE(sa, sum); - } - GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; - int64_t j = 0; - GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - for (; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; - int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; - - GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); - GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); - GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); - GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); - - k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); - - GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); - // kv + s * decay + sa * b - state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); - state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); - GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); - - result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; } - } - GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); - - // There shouldn't be left-overs though. - for (; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v[t_h_i_offset] * k_val; - - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + dst_data[t_h_i_offset] = result; } } } - } + #else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t ii = 0; ii < head_size; ii++) { + int64_t t_h_i_offset = t_h_offset + ii; + int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + + GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + + float sa = 0; + { + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); + ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); + sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); + } + } + GGML_F32_VEC_REDUCE(sa, sum); + } + + GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + + int64_t j = 0; + GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + for (; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; + int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; + + GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); + GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); + GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); + GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); + + k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); + + GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); + // kv + s * decay + sa * b + state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); + state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); + GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); + + result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + } + } + GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); + + // There shouldn't be left-overs though. + for (; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v[t_h_i_offset] * k_val; + + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + } + } + } + } + #endif #else for (int64_t t = 0; t < T; t++) { int64_t t_offset = t * t_stride; diff --git a/ggml/src/ggml-cpu/simd-mappings.h b/ggml/src/ggml-cpu/simd-mappings.h index 45c31cf1f..2e3669c01 100644 --- a/ggml/src/ggml-cpu/simd-mappings.h +++ b/ggml/src/ggml-cpu/simd-mappings.h @@ -17,7 +17,123 @@ // number of elements to fit in a single register // -#if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) +#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_FMA) + +#define GGML_SIMD + +// F32 SVE +#define GGML_F32_EPR 8 +#define DEFAULT_PG svptrue_b32() + +#define GGML_F32xt svfloat32_t +#define GGML_F32xt_ZERO svdup_n_f32(0.0f) +#define GGML_F32xt_SET1(x) svdup_n_f32(x) +#define GGML_F32xt_LOAD_IMPL(pg, a, ...) svld1_f32(pg, a) +#define GGML_F32xt_LOAD(...) GGML_F32xt_LOAD_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_STORE_IMPL(pg,a,b) svst1_f32(pg, a, b) +#define GGML_F32xt_STORE(...) GGML_F32xt_STORE_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_FMA_IMPL(pg, a, b, c) svmad_f32_m(pg, a, b, c) +#define GGML_F32xt_FMA(...) GGML_F32xt_FMA_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_ADD_IMPL(pg, a, b) svadd_f32_m(pg, a, b) +#define GGML_F32xt_ADD(...) GGML_F32xt_ADD_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_MUL_IMPL(pg, a, b) svmul_f32_m(pg, a, b) +#define GGML_F32xt_MUL(...) GGML_F32xt_MUL_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_REDUCE_ONE_IMPL(pg, a) svaddv(pg, a) +#define GGML_F32xt_REDUCE_ONE(...) GGML_F32xt_REDUCE_ONE_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_REDUCE_IMPL(pg, res, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8) \ +{ \ + sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum2); \ + sum3 = svadd_f32_m(DEFAULT_PG, sum3, sum4); \ + sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum6); \ + sum7 = svadd_f32_m(DEFAULT_PG, sum7, sum8); \ + sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum3); \ + sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum7); \ + sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum5); \ + (res) = (ggml_float) GGML_F32xt_REDUCE_ONE(sum1); \ +} +#define GGML_F32xt_REDUCE(...) GGML_F32xt_REDUCE_IMPL(DEFAULT_PG, __VA_ARGS__) + +#define GGML_F32_VEC GGML_F32xt +#define GGML_F32_VEC_ZERO GGML_F32xt_ZERO +#define GGML_F32_VEC_SET1 GGML_F32xt_SET1 +#define GGML_F32_VEC_LOAD GGML_F32xt_LOAD +#define GGML_F32_VEC_STORE GGML_F32xt_STORE +#define GGML_F32_VEC_FMA GGML_F32xt_FMA +#define GGML_F32_VEC_ADD GGML_F32xt_ADD +#define GGML_F32_VEC_MUL GGML_F32xt_MUL +#define GGML_F32_VEC_REDUCE GGML_F32xt_REDUCE + +// F16 NEON + +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + #define GGML_F16_STEP 32 + #define GGML_F16_EPR 8 + + #define GGML_F16x8 float16x8_t + #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) + #define GGML_F16x8_SET1(x) vdupq_n_f16(x) + #define GGML_F16x8_LOAD(x) vld1q_f16((const __fp16 *)(x)) + #define GGML_F16x8_STORE vst1q_f16 + #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) + #define GGML_F16x8_ADD vaddq_f16 + #define GGML_F16x8_MUL vmulq_f16 + #define GGML_F16x8_REDUCE(res, x) \ + do { \ + int offset = GGML_F16_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ + } \ + const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \ + const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \ + (res) = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ + } while (0) + + #define GGML_F16_VEC GGML_F16x8 + #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO + #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 + #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE((__fp16 *)(p), (r)[i]) + #define GGML_F16_VEC_FMA GGML_F16x8_FMA + #define GGML_F16_VEC_ADD GGML_F16x8_ADD + #define GGML_F16_VEC_MUL GGML_F16x8_MUL + #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE +#else + // if FP16 vector arithmetic is not supported, we use FP32 instead + // and take advantage of the vcvt_ functions to convert to/from FP16 + + #define GGML_F16_STEP 16 + #define GGML_F16_EPR 4 + + #define GGML_F32Cx4 float32x4_t + #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) + #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) + #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const __fp16 *)(x))) + #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) + #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) + #define GGML_F32Cx4_ADD vaddq_f32 + #define GGML_F32Cx4_MUL vmulq_f32 + #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE + + #define GGML_F16_VEC GGML_F32Cx4 + #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO + #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 + #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE((__fp16 *)(p), r[i]) + #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA + #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD + #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL + #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE +#endif + +#elif defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) #define GGML_SIMD diff --git a/ggml/src/ggml-cpu/vec.cpp b/ggml/src/ggml-cpu/vec.cpp index 02d406182..f7614568e 100644 --- a/ggml/src/ggml-cpu/vec.cpp +++ b/ggml/src/ggml-cpu/vec.cpp @@ -17,29 +17,98 @@ void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * G #if defined(GGML_SIMD) float sumf = 0.0f; - const int np = (n & ~(GGML_F32_STEP - 1)); - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + #if defined(__ARM_FEATURE_SVE) + const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; + const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 + const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + const int np = (n & ~(ggml_f32_step - 1)); + svfloat32_t sum1 = svdup_n_f32(0.0f); + svfloat32_t sum2 = svdup_n_f32(0.0f); + svfloat32_t sum3 = svdup_n_f32(0.0f); + svfloat32_t sum4 = svdup_n_f32(0.0f); + svfloat32_t sum5 = svdup_n_f32(0.0f); + svfloat32_t sum6 = svdup_n_f32(0.0f); + svfloat32_t sum7 = svdup_n_f32(0.0f); + svfloat32_t sum8 = svdup_n_f32(0.0f); + svfloat32_t ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8; + svfloat32_t ay1,ay2,ay3,ay4,ay5,ay6,ay7,ay8; + for (int i = 0; i < np; i += ggml_f32_step) { + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + sum1 = GGML_F32_VEC_FMA(ax1, ay1, sum1); - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); + sum2 = GGML_F32_VEC_FMA(ax2, ay2, sum2); - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); + ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr); + ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr); + sum3 = GGML_F32_VEC_FMA(ax3, ay3, sum3); + + ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr); + ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr); + sum4 = GGML_F32_VEC_FMA(ax4, ay4, sum4); + + ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr); + ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr); + sum5 = GGML_F32_VEC_FMA(ax5, ay5, sum5); + + ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr); + ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr); + sum6 = GGML_F32_VEC_FMA(ax6, ay6, sum6); + + ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr); + ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr); + sum7 = GGML_F32_VEC_FMA(ax7, ay7, sum7); + + ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr); + ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr); + sum8 = GGML_F32_VEC_FMA(ax8, ay8, sum8); } - } + // leftovers + // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop + const int np2 = (n & ~(ggml_f32_epr - 1)); + for (int i = np; i < np2; i += ggml_f32_epr) { + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + sum1 = GGML_F32_VEC_FMA(ax1, ay1, sum1); + } + // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only + if (np2 < n) { + svbool_t pg = svwhilelt_b32(np2, n); + ax1 = svld1_f32(pg, x + np2); + ay1 = svld1_f32(pg, y + np2); + sum1 = svmad_f32_m(pg, ax1, ay1, sum1); + } + // reduce sum1,sum2 to sum1 + GGML_F32_VEC_REDUCE(sumf, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8); + #else + const int np = (n & ~(GGML_F32_STEP - 1)); - // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - // leftovers - for (int i = np; i < n; ++i) { - sumf += x[i]*y[i]; - } + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + + sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); + } + } + + // reduce sum0..sum3 to sum0 + GGML_F32_VEC_REDUCE(sumf, sum); + + // leftovers + for (int i = np; i < n; ++i) { + sumf += x[i]*y[i]; + } + #endif #else // scalar ggml_float sumf = 0.0; diff --git a/ggml/src/ggml-cpu/vec.h b/ggml/src/ggml-cpu/vec.h index c77349ebe..163a27435 100644 --- a/ggml/src/ggml-cpu/vec.h +++ b/ggml/src/ggml-cpu/vec.h @@ -5,6 +5,7 @@ #include "ggml-impl.h" #include "simd-mappings.h" #include "ggml.h" +#include "ggml-cpu.h" #if defined(GGML_USE_ACCELERATE) #include @@ -148,27 +149,108 @@ inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * GG inline static void ggml_vec_mad_f32(const int n, float * GGML_RESTRICT y, const float * GGML_RESTRICT x, const float v) { #if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); + #if defined(__ARM_FEATURE_SVE) - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; + const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 + const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + const int np = (n & ~(ggml_f32_step - 1)); + svfloat32_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8; + svfloat32_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8; + for (int i = 0; i < np; i += ggml_f32_step) { - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + ay1 = GGML_F32_VEC_FMA(ax1, vx, ay1); - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + GGML_F32_VEC_STORE(y + i, ay1); + + ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_FMA(ax2, vx, ay2); + + GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2); + + ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr); + ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr); + ay3 = GGML_F32_VEC_FMA(ax3, vx, ay3); + + GGML_F32_VEC_STORE(y + i + 2*ggml_f32_epr, ay3); + + ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr); + ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr); + ay4 = GGML_F32_VEC_FMA(ax4, vx, ay4); + + GGML_F32_VEC_STORE(y + i + 3*ggml_f32_epr, ay4); + + ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr); + ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr); + ay5 = GGML_F32_VEC_FMA(ax5, vx, ay5); + + GGML_F32_VEC_STORE(y + i + 4*ggml_f32_epr, ay5); + + ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr); + ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr); + ay6 = GGML_F32_VEC_FMA(ax6, vx, ay6); + + GGML_F32_VEC_STORE(y + i + 5*ggml_f32_epr, ay6); + + ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr); + ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr); + ay7 = GGML_F32_VEC_FMA(ax7, vx, ay7); + + GGML_F32_VEC_STORE(y + i + 6*ggml_f32_epr, ay7); + + ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr); + ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr); + ay8 = GGML_F32_VEC_FMA(ax8, vx, ay8); + + GGML_F32_VEC_STORE(y + i + 7*ggml_f32_epr, ay8); } - } + // leftovers + // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop + const int np2 = (n & ~(ggml_f32_epr - 1)); + for (int i = np; i < np2; i += ggml_f32_epr) { + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + ay1 = GGML_F32_VEC_FMA(ax1, vx, ay1); - // leftovers - for (int i = np; i < n; ++i) { - y[i] += x[i]*v; - } + GGML_F32_VEC_STORE(y + i, ay1); + } + // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only + if (np2 < n) { + svbool_t pg =svwhilelt_b32(np2, n); + ax1 = svld1_f32(pg, x + np2); + ay1 = svld1_f32(pg, y + np2); + ay1 = svmad_f32_m(pg, ax1, vx, ay1); + + svst1_f32(pg, y + np2, ay1); + } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); + + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] += x[i]*v; + } + #endif #else // scalar for (int i = 0; i < n; ++i) { @@ -220,36 +302,45 @@ inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int } #if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL]; - - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - vx[k] = GGML_F32_VEC_SET1(v[k][0]); - } - - GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]); + #if defined(__ARM_FEATURE_SVE) + // scalar Route to scalar implementation //TODO: Write SVE code + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + for (int i = 0; i < n; ++i) { + y[i] += x[k][i]*v[k][0]; } - - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); } - } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); - // leftovers - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - for (int i = np; i < n; ++i) { - y[i] += x[k][i]*v[k][0]; + GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL]; + + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + vx[k] = GGML_F32_VEC_SET1(v[k][0]); } - } + + GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]); + } + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + for (int i = np; i < n; ++i) { + y[i] += x[k][i]*v[k][0]; + } + } + #endif #else // scalar for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { @@ -265,25 +356,53 @@ inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { #if defined(GGML_USE_ACCELERATE) vDSP_vsmul(y, 1, &v, y, 1, n); #elif defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); + #if defined(__ARM_FEATURE_SVE) + const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; + const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 + const int ggml_f32_step = 2 * ggml_f32_epr; - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + const int np = (n & ~(ggml_f32_step - 1)); + svfloat32_t ay1; + svfloat32_t ay2; + for (int i = 0; i < np; i += ggml_f32_step) { + ay1 = GGML_F32_VEC_LOAD(y + i); + ay1 = GGML_F32_VEC_MUL(ay1, vx); + GGML_F32_VEC_STORE(y + i, ay1); - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_MUL(ay[j], vx); - - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_MUL(ay2, vx); + GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2); } - } + // leftovers + // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only + if (np < n) { + svbool_t pg = svwhilelt_b32(np, n); + ay1 = svld1_f32(pg, y + np); + ay1 = svmul_f32_m(pg, ay1, vx); + svst1_f32(pg, y + np, ay1); + } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); - // leftovers - for (int i = np; i < n; ++i) { - y[i] *= v; - } + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_MUL(ay[j], vx); + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] *= v; + } + #endif #else // scalar for (int i = 0; i < n; ++i) { From 6385b843a8dc8e15b8362196039720c58dd79fa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Thu, 29 May 2025 08:15:01 +0200 Subject: [PATCH 03/21] llama : add RobertaForSequenceClassification reranker support (#13875) --- convert_hf_to_gguf.py | 17 +++++++++++------ gguf-py/gguf/constants.py | 3 +++ src/llama-arch.cpp | 2 ++ src/llama-arch.h | 2 ++ src/llama-hparams.h | 3 +++ src/llama-model.cpp | 5 +++-- 6 files changed, 24 insertions(+), 8 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 227ae7bc2..797773193 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -3695,6 +3695,10 @@ class BertModel(TextModel): self.gguf_writer.add_causal_attention(False) self._try_set_pooling_type() + if cls_out_labels := self.hparams.get("id2label"): + key_name = gguf.Keys.Classifier.OUTPUT_LABELS.format(arch = gguf.MODEL_ARCH_NAMES[self.model_arch]) + self.gguf_writer.add_array(key_name, [v for k, v in sorted(cls_out_labels.items())]) + def set_vocab(self): tokens, toktypes, tokpre = self.get_vocab_base() self.vocab_size = len(tokens) @@ -3745,12 +3749,13 @@ class BertModel(TextModel): if name.startswith("cls.seq_relationship"): return [] - # For BertForSequenceClassification (direct projection layer) - if name == "classifier.weight": - name = "classifier.out_proj.weight" + if self.hparams.get("id2label"): + # For BertForSequenceClassification (direct projection layer) + if name == "classifier.weight": + name = "classifier.out_proj.weight" - if name == "classifier.bias": - name = "classifier.out_proj.bias" + if name == "classifier.bias": + name = "classifier.out_proj.bias" return [(self.map_tensor_name(name), data_torch)] @@ -3846,7 +3851,7 @@ class BertModel(TextModel): self.gguf_writer.add_add_eos_token(True) -@ModelBase.register("RobertaModel") +@ModelBase.register("RobertaModel", "RobertaForSequenceClassification") class RobertaModel(BertModel): model_arch = gguf.MODEL_ARCH.BERT diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 31163effa..635b61f22 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -177,6 +177,9 @@ class Keys: EMBEDDING_LENGTH = "{arch}.convnext.embedding_length" BLOCK_COUNT = "{arch}.convnext.block_count" + class Classifier: + OUTPUT_LABELS = "{arch}.classifier.output_labels" + class Tokenizer: MODEL = "tokenizer.ggml.model" PRE = "tokenizer.ggml.pre" diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index abf436ada..2bb18c85f 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -174,6 +174,8 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" }, { LLM_KV_CONVNEXT_BLOCK_COUNT, "%s.convnext.block_count" }, + { LLM_KV_CLASSIFIER_OUTPUT_LABELS, "%s.classifier.output_labels" }, + { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" }, { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" }, { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" }, diff --git a/src/llama-arch.h b/src/llama-arch.h index 41a023da3..930cb4eca 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -213,6 +213,8 @@ enum llm_kv { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, LLM_KV_CONVNEXT_BLOCK_COUNT, + LLM_KV_CLASSIFIER_OUTPUT_LABELS, + // deprecated: LLM_KV_TOKENIZER_PREFIX_ID, LLM_KV_TOKENIZER_SUFFIX_ID, diff --git a/src/llama-hparams.h b/src/llama-hparams.h index 2d72eab18..b2bcb8b01 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -131,6 +131,9 @@ struct llama_hparams { bool attn_soft_cap = false; bool use_kq_norm = true; + // for Classifiers + uint32_t n_cls_out = 1; + // llama4 uint32_t n_moe_layer_step = 0; uint32_t n_no_rope_layer_step = 4; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index e99f5309f..4a4618a2b 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -683,6 +683,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false); + ml.get_arr_n(LLM_KV_CLASSIFIER_OUTPUT_LABELS, hparams.n_cls_out, false); switch (hparams.n_layer) { case 3: @@ -2121,8 +2122,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) { cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED); cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {n_embd}, TENSOR_NOT_REQUIRED); - cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED); - cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {1}, TENSOR_NOT_REQUIRED); + cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED); + cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {hparams.n_cls_out}, TENSOR_NOT_REQUIRED); } tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); From 5ca82fc1d7f8cb27f3cbb3019e944a80d5219828 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Thu, 29 May 2025 10:00:57 +0200 Subject: [PATCH 04/21] convert : workaround for AutoConfig dummy labels (#13881) --- convert_hf_to_gguf.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 797773193..8fcff0de7 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -3690,14 +3690,20 @@ class BertModel(TextModel): super().__init__(*args, **kwargs) self.vocab_size = None + if cls_out_labels := self.hparams.get("id2label"): + if len(cls_out_labels) == 2 and cls_out_labels[0] == "LABEL_0": + # Remove dummy labels added by AutoConfig + cls_out_labels = None + self.cls_out_labels = cls_out_labels + def set_gguf_parameters(self): super().set_gguf_parameters() self.gguf_writer.add_causal_attention(False) self._try_set_pooling_type() - if cls_out_labels := self.hparams.get("id2label"): + if self.cls_out_labels: key_name = gguf.Keys.Classifier.OUTPUT_LABELS.format(arch = gguf.MODEL_ARCH_NAMES[self.model_arch]) - self.gguf_writer.add_array(key_name, [v for k, v in sorted(cls_out_labels.items())]) + self.gguf_writer.add_array(key_name, [v for k, v in sorted(self.cls_out_labels.items())]) def set_vocab(self): tokens, toktypes, tokpre = self.get_vocab_base() @@ -3749,7 +3755,7 @@ class BertModel(TextModel): if name.startswith("cls.seq_relationship"): return [] - if self.hparams.get("id2label"): + if self.cls_out_labels: # For BertForSequenceClassification (direct projection layer) if name == "classifier.weight": name = "classifier.out_proj.weight" From 66c92061f5c34d2f9a630b7b50cca5ce3ebb4b16 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 29 May 2025 12:17:16 +0300 Subject: [PATCH 05/21] tests : remove json.hpp from a test (#13880) ggml-ci --- tests/test-chat-parser.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test-chat-parser.cpp b/tests/test-chat-parser.cpp index 2113a1284..59e44e07d 100644 --- a/tests/test-chat-parser.cpp +++ b/tests/test-chat-parser.cpp @@ -7,7 +7,6 @@ // #include #include -#include #include #include "chat-parser.h" @@ -15,8 +14,6 @@ #include "log.h" #include "regex-partial.h" -using json = nlohmann::ordered_json; - template static void assert_equals(const T & expected, const T & actual) { if (expected != actual) { From dd8ba93416f492c168f7e20f0b1434c08ebeaeb5 Mon Sep 17 00:00:00 2001 From: Vineel Abhinav <131174187+vineelabhinav@users.noreply.github.com> Date: Thu, 29 May 2025 14:48:43 +0530 Subject: [PATCH 06/21] ggml: aarch64: Implement SVE F32 kernels for Mamba Sequential Scan Algorithm (#13882) * F32-Mamba-Seq_Scan-SVE * Fix formatting * ggml : missing space --------- Co-authored-by: Georgi Gerganov --- ggml/src/ggml-cpu/ops.cpp | 100 +++++++++++++++++++++++++++----------- ggml/src/ggml-cpu/vec.h | 36 ++++++++++++++ 2 files changed, 108 insertions(+), 28 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index f4692f3cd..d8de7531b 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7633,39 +7633,83 @@ static void ggml_compute_forward_ssm_scan_f32( const int ir1 = MIN(ir0 + dr, nr); const int ir = ir1 - ir0; - for (int i3 = 0; i3 < n_s; ++i3) { - for (int i2 = 0; i2 < n_t; ++i2) { - const float * s0 = (const float *) ((const char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2])); // {d_state, d_inner, n_s} - const float * x = (const float *) ((const char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} - const float * dt = (const float *) ((const char *) src2->data + ir0*(src2->nb[0]) + i2*(src2->nb[1]) + i3*(src2->nb[2])); // {d_inner, n_t, n_s} - const float * A = (const float *) ((const char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner} - const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[1]) + i3*(src4->nb[2])); // {d_state, n_t, n_s} - const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[1]) + i3*(src5->nb[2])); // {d_state, n_t, n_s} - float * y = ( float *) (( char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} - float * s = ( float *) (( char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[3]); // {d_state, d_inner, n_s} + #ifdef __ARM_FEATURE_SVE + for (int i3 = 0; i3 < n_s; ++i3) { + for (int i2 = 0; i2 < n_t; ++i2) { + const float * s0 = (const float *) ((const char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2])); // {d_state, d_inner, n_s} + const float * x = (const float *) ((const char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} + const float * dt = (const float *) ((const char *) src2->data + ir0*(src2->nb[0]) + i2*(src2->nb[1]) + i3*(src2->nb[2])); // {d_inner, n_t, n_s} + const float * A = (const float *) ((const char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner} + const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[1]) + i3*(src4->nb[2])); // {d_state, n_t, n_s} + const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[1]) + i3*(src5->nb[2])); // {d_state, n_t, n_s} + float * y = ( float *) (( char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} + float * s = ( float *) (( char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[3]); // {d_state, d_inner, n_s} - // use the output as the source for the next token-wise iterations - if (i2 > 0) { s0 = s; } + // use the output as the source for the next token-wise iterations + if (i2 > 0) { s0 = s; } - // d_inner - for (int i1 = 0; i1 < ir; ++i1) { - // ref: https://github.com/state-spaces/mamba/blob/34076d664838588a3c97727b263478ab9f621a07/mamba_ssm/ops/triton/selective_state_update.py#L78 - float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1]; - float x_dt = x[i1] * dt_soft_plus; - float sumf = 0.0f; - // d_state - for (int i0 = 0; i0 < nc; ++i0) { - int i = i0 + i1*nc; - // state = prev_state * dA + dB * x - float state = (s0[i] * expf(dt_soft_plus * A[i])) + (B[i0] * x_dt); - // y = rowwise_dotprod(state, C) - sumf += state * C[i0]; - s[i] = state; + // d_inner + for (int i1 = 0; i1 < ir; ++i1) { + float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1]; + float x_dt = x[i1] * dt_soft_plus; + svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); + svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); + svfloat32_t r1_vector = GGML_F32_VEC_ZERO; + + for (int64_t k = 0; k < nc; k += svcntw()) { + svfloat32_t vA = GGML_F32_VEC_LOAD(&A[i1*nc + k]); + svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k]); + svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k]); + svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[i1*nc + k]); + + svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); + t1 = exp_ps_sve(svptrue_b32(), t1); + svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); + + vs0 = GGML_F32_VEC_FMA(vs0, t1, t2); + r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); + + GGML_F32_VEC_STORE(&s[i1*nc + k], vs0); + } + y[i1] = GGML_F32xt_REDUCE_ONE(r1_vector); } - y[i1] = sumf; } } - } + #else + for (int i3 = 0; i3 < n_s; ++i3) { + for (int i2 = 0; i2 < n_t; ++i2) { + const float * s0 = (const float *) ((const char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2])); // {d_state, d_inner, n_s} + const float * x = (const float *) ((const char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} + const float * dt = (const float *) ((const char *) src2->data + ir0*(src2->nb[0]) + i2*(src2->nb[1]) + i3*(src2->nb[2])); // {d_inner, n_t, n_s} + const float * A = (const float *) ((const char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner} + const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[1]) + i3*(src4->nb[2])); // {d_state, n_t, n_s} + const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[1]) + i3*(src5->nb[2])); // {d_state, n_t, n_s} + float * y = ( float *) (( char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} + float * s = ( float *) (( char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[3]); // {d_state, d_inner, n_s} + + // use the output as the source for the next token-wise iterations + if (i2 > 0) { s0 = s; } + + // d_inner + for (int i1 = 0; i1 < ir; ++i1) { + // ref: https://github.com/state-spaces/mamba/blob/34076d664838588a3c97727b263478ab9f621a07/mamba_ssm/ops/triton/selective_state_update.py#L78 + float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1]; + float x_dt = x[i1] * dt_soft_plus; + float sumf = 0.0f; + // d_state + for (int i0 = 0; i0 < nc; ++i0) { + int i = i0 + i1*nc; + // state = prev_state * dA + dB * x + float state = (s0[i] * expf(dt_soft_plus * A[i])) + (B[i0] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[i0]; + s[i] = state; + } + y[i1] = sumf; + } + } + } + #endif } void ggml_compute_forward_ssm_scan( diff --git a/ggml/src/ggml-cpu/vec.h b/ggml/src/ggml-cpu/vec.h index 163a27435..09dbade21 100644 --- a/ggml/src/ggml-cpu/vec.h +++ b/ggml/src/ggml-cpu/vec.h @@ -647,6 +647,42 @@ inline static ggml_fp16_t ggml_silu_f16(ggml_fp16_t x) { #error "ref: https://github.com/ggml-org/llama.cpp/pull/7154#issuecomment-2143844461" #endif +/* Below function was borrowed from the GitHub repository: +https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp */ +#if defined(__ARM_FEATURE_SVE) && defined(__aarch64__) + inline static svfloat32_t exp_ps_sve(svbool_t pg, svfloat32_t src) { + // Constants + const svfloat32_t log2_e = svdup_n_f32(1.4426950409f); + const svfloat32_t ln2 = svdup_n_f32(0.6931473921f); + const svfloat32_t half_ln2_sq = svdup_n_f32(0.2413862043f); + const svuint32_t not_mask17 = svdup_n_u32(~((1u << 17) - 1)); + const svfloat32_t one = svdup_n_f32(1.0f); + const svfloat32_t inactive1 = svdup_n_f32(0.0f); + const svint32_t inactive2 = svdup_n_s32(0); + + // Algorithm starts here + svfloat32_t t0 = svmul_f32_m(pg, src, log2_e); // y = x * log2(e) + svfloat32_t t1 = svrintm_f32_m(inactive1, pg, t0); // rount to int (float) + svint32_t t2 = svcvt_s32_f32_m(inactive2, pg, t1); // n + + t1 = svsub_f32_m(pg, t0, t1); // a = y - floor(y) + t1 = svadd_f32_m(pg, t1, one); // b = a + 1 + + svuint32_t t3 = svlsr_n_u32_m(pg, svreinterpret_u32_f32(t1), 17); // v = b >> 17 (u32) + svfloat32_t t4 = svexpa_f32(t3); // c = fexpa(v) + t4 = svscale_f32_m(pg, t4, t2); // fexpa(v) * 2^(n) + + // and_(t2.d, t1.d, not_mask17.d) + svfloat32_t t5 = svreinterpret_f32_u32(svand_u32_m(pg, svreinterpret_u32_f32(t1), not_mask17)); + t5 = svsub_f32_m(pg, t1, t5); // z + t0 = svmla_f32_m(pg, ln2, t5, half_ln2_sq); // ln2 + half_ln2_sq * z + t0 = svmla_f32_m(pg, one, t5, t0); // 1 + (ln2 * z) + (half_ln2_sq * z * z) + t0 = svmul_f32_m(pg, t0, t4); // Final result + + return t0; + } +#endif + #if defined(__ARM_NEON) && defined(__aarch64__) // adapted from arm limited optimized routine From 21fcc21ad5e5de2daa5da8d08dbbcc86b8d815d7 Mon Sep 17 00:00:00 2001 From: Christian Kastner Date: Thu, 29 May 2025 12:50:25 +0200 Subject: [PATCH 07/21] cmake: Factor out CPU architecture detection (#13883) * cmake: Define function for querying architecture The tests and results match exactly those of ggml/src/CMakeLists.txt * Switch arch detection over to new function --- ggml/cmake/common.cmake | 25 +++++++++++++++++++++++++ ggml/src/CMakeLists.txt | 2 ++ ggml/src/ggml-cpu/CMakeLists.txt | 24 +++++++----------------- 3 files changed, 34 insertions(+), 17 deletions(-) diff --git a/ggml/cmake/common.cmake b/ggml/cmake/common.cmake index 1976d0ae9..bb1ec9b37 100644 --- a/ggml/cmake/common.cmake +++ b/ggml/cmake/common.cmake @@ -24,3 +24,28 @@ function(ggml_get_flags CCID CCVER) set(GF_C_FLAGS ${C_FLAGS} PARENT_SCOPE) set(GF_CXX_FLAGS ${CXX_FLAGS} PARENT_SCOPE) endfunction() + +function(ggml_get_system_arch) + if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR + CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) + set(GGML_SYSTEM_ARCH "ARM" PARENT_SCOPE) + elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR + CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) + set(GGML_SYSTEM_ARCH "x86" PARENT_SCOPE) + elseif ("${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "ppc64le " OR + "${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "powerpc ") + set(GGML_SYSTEM_ARCH "PowerPC" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") + set(GGML_SYSTEM_ARCH "loongarch64" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64") + set(GGML_SYSTEM_ARCH "riscv64" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x") + set(GGML_SYSTEM_ARCH "s390x" PARENT_SCOPE) + else() + set(GGML_SYSTEM_ARCH "UNKNOWN" PARENT_SCOPE) + endif() +endfunction() diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index ddea5ad38..330f9d820 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -109,6 +109,8 @@ if (MSVC) else () set(CMAKE_GENERATOR_PLATFORM_LWR "") endif () +ggml_get_system_arch() +message(STATUS "GGML_SYSTEM_ARCH: ${GGML_SYSTEM_ARCH}") if (NOT MSVC) if (GGML_STATIC) diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index bf4fe79a9..b3237eead 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -82,13 +82,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) target_link_libraries(${GGML_CPU_NAME} PUBLIC memkind) endif() - if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR - CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR - (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND - CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) - + if (GGML_SYSTEM_ARCH STREQUAL "ARM") message(STATUS "ARM detected") - if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang") message(FATAL_ERROR "MSVC is not supported for ARM, use clang") else() @@ -170,12 +165,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endforeach() endif() endif() - elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR - (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND - CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) - + elseif (GGML_SYSTEM_ARCH STREQUAL "x86") message(STATUS "x86 detected") - if (MSVC) # instruction set detection for MSVC only if (GGML_NATIVE) @@ -318,7 +309,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) target_link_libraries(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_FEATS_NAME}) endif() - elseif ("${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "ppc64le " OR "${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "powerpc ") + elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") message(STATUS "PowerPC detected") if (GGML_NATIVE) if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") @@ -344,9 +335,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list(APPEND ARCH_FLAGS -mcpu=${GGML_CPU_POWERPC_CPUTYPE}) endif() endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") + elseif (GGML_SYSTEM_ARCH STREQUAL "loongarch64") message(STATUS "loongarch64 detected") - list(APPEND ARCH_FLAGS -march=loongarch64) if (GGML_LASX) list(APPEND ARCH_FLAGS -mlasx) @@ -354,8 +344,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_LSX) list(APPEND ARCH_FLAGS -mlsx) endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64") - message(STATUS "RISC-V detected") + elseif (GGML_SYSTEM_ARCH STREQUAL "riscv64") + message(STATUS "riscv64 detected") if (GGML_RVV) if (GGML_XTHEADVECTOR) list(APPEND ARCH_FLAGS -march=rv64gc_xtheadvector -mabi=lp64d) @@ -365,7 +355,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list(APPEND ARCH_FLAGS -march=rv64gcv -mabi=lp64d) endif() endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x") + elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") message(STATUS "s390x detected") file(READ "/proc/cpuinfo" CPUINFO_CONTENTS) string(REGEX REPLACE "machine[ \t\r\n]*=[ \t\r\n]*([0-9]+)" "\\1" S390X_M ${CPUINFO_CONTENTS}) From 54a2c7a8cd8a32b44e3a98c2999b0f5c9114be5c Mon Sep 17 00:00:00 2001 From: Yibo Cai Date: Thu, 29 May 2025 19:39:20 +0800 Subject: [PATCH 08/21] arm64: optimize q4_k_q8_k kernel with i8mm (#13886) This PR improves q4_k_q8_k gemm kernel with arm64 i8mm instruction. Tested on neoverse-n2 with llama3 8b q4_k_m quantization model. - 34% ~ 50% S_PP uplift for all batch sizes - 12% ~ 37% S_TG uplift for batch size 4 and above Perplexity doesn't change with this PR. ``` // tested on neoverse-n2 $ llama-batched-bench \ -m Meta-Llama-3-8B-Instruct-Q4_K_M.gguf \ --no-mmap -fa \ -c 8192 -b 4096 -ub 512 -npp 128 -ntg 128 \ -npl 1,2,4,8,16,32 \ -t 64 --------------------------------------------------------------------- | PP | TG | B | S_PP t/s | S_TG t/s | | | | | original | this pr | original | this pr | |-------|--------|------|----------|----------|----------|----------| | 128 | 128 | 1 | 110.12 | 147.83 | 24.36 | 24.28 | | 128 | 128 | 2 | 121.16 | 172.42 | 46.36 | 47.93 | | 128 | 128 | 4 | 120.15 | 169.75 | 74.68 | 84.00 | | 128 | 128 | 8 | 130.97 | 196.81 | 91.04 | 114.74 | | 128 | 128 | 16 | 131.01 | 196.88 | 101.43 | 135.79 | | 128 | 128 | 32 | 130.85 | 196.51 | 106.97 | 147.29 | --------------------------------------------------------------------- ``` --- ggml/src/ggml-cpu/ggml-cpu-quants.c | 144 ++++++++++++++++++++++++++++ ggml/src/ggml-cpu/ggml-cpu.c | 4 + 2 files changed, 148 insertions(+) diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c index fe4a5a833..40bded476 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ b/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -6995,7 +6995,11 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); +#ifdef __ARM_FEATURE_MATMUL_INT8 + assert((nrc == 2) || (nrc == 1)); +#else assert(nrc == 1); +#endif UNUSED(nrc); UNUSED(bx); UNUSED(by); @@ -7012,6 +7016,146 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi uint32_t utmp[4]; +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q4_K * GGML_RESTRICT x0 = x; + const block_q4_K * GGML_RESTRICT x1 = (const block_q4_K *) ((const uint8_t *)vx + bx); + const block_q8_K * GGML_RESTRICT y0 = y; + const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); + + const uint8x16_t m4b = vdupq_n_u8(0x0f); + + float32x4_t vfsum = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { + const uint8_t * GGML_RESTRICT qx0 = x0->qs; + const uint8_t * GGML_RESTRICT qx1 = x1->qs; + const int8_t * GGML_RESTRICT qy0 = y0->qs; + const int8_t * GGML_RESTRICT qy1 = y1->qs; + + // decode scales and mins + int8_t x0_scales[8], x1_scales[8]; + int16x8_t x0_mins, x1_mins; + { + uint32_t scales_mins[3]; + memcpy(scales_mins, x0->scales, 12); + const uint32_t mins_0_3 = scales_mins[1] & kmask1; + const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); + const uint32x2_t mins = {mins_0_3, mins_4_7}; + x0_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); + uint32_t scales[2]; + scales[0] = scales_mins[0] & kmask1; // scales 0~3 + scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 + memcpy(x0_scales, scales, 8); + } + { + uint32_t scales_mins[3]; + memcpy(scales_mins, x1->scales, 12); + const uint32_t mins_0_3 = scales_mins[1] & kmask1; + const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); + const uint32x2_t mins = {mins_0_3, mins_4_7}; + x1_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); + uint32_t scales[2]; + scales[0] = scales_mins[0] & kmask1; // scales 0~3 + scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 + memcpy(x1_scales, scales, 8); + } + + int32x4_t visum = {0}; + + // process 64 data points per iteration, totally 256 data points + for (int j = 0; j < QK_K / 64; ++j, qx0 += 32, qx1 += 32, qy0 += 64, qy1 += 64) { + const int8x16x4_t vy0 = vld1q_s8_x4(qy0); + const int8x16x4_t vy1 = vld1q_s8_x4(qy1); + + int8x16_t vx0[4], vx1[4]; + { + const uint8x16x2_t vv = vld1q_u8_x2(qx0); + vx0[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); + vx0[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); + vx0[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); + vx0[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); + } + { + const uint8x16x2_t vv = vld1q_u8_x2(qx1); + vx1[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); + vx1[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); + vx1[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); + vx1[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); + } + + // process 32 data points (share same block scale) per iteration + for (int k = 0; k < 2; ++k) { + const int blk = j * 2 + k; + const int32x4_t block_scale = { + x0_scales[blk], + x0_scales[blk], + x1_scales[blk], + x1_scales[blk], + }; + + int32x4_t vr = {0}; + for (int l = 0; l < 2; ++l) { + const int idx = k * 2 + l; + const int64x2_t vx0_s64 = vreinterpretq_s64_s8(vx0[idx]); + const int64x2_t vx1_s64 = vreinterpretq_s64_s8(vx1[idx]); + const int64x2_t vy0_s64 = vreinterpretq_s64_s8(vy0.val[idx]); + const int64x2_t vy1_s64 = vreinterpretq_s64_s8(vy1.val[idx]); + const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vx0_s64, vx1_s64)); + const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vx0_s64, vx1_s64)); + const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vy0_s64, vy1_s64)); + const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vy0_s64, vy1_s64)); + vr = vmmlaq_s32(vr, vx_l, vy_l); + vr = vmmlaq_s32(vr, vx_h, vy_h); + } + // apply block scale, will NOT overflow + // block_scale * sum_256(int4*int8) <= 2^(8+8+4+8) = 28 bits + visum = vmlaq_s32(visum, vr, block_scale); + } + } + + // adjust bias, apply superblock scale + { + int32_t bias[4]; + // no obvious uplift from sve sdot-16, just use neon mul add + const int16x8_t y0_sums = vpaddq_s16(vld1q_s16(y0->bsums), vld1q_s16(y0->bsums+8)); + const int16x8_t y1_sums = vpaddq_s16(vld1q_s16(y1->bsums), vld1q_s16(y1->bsums+8)); + bias[0] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x0_mins)), + vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x0_mins)))); + bias[1] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x0_mins)), + vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x0_mins)))); + bias[2] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x1_mins)), + vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x1_mins)))); + bias[3] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x1_mins)), + vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x1_mins)))); + const float32x4_t dmins = { + GGML_FP16_TO_FP32(x0->dmin) * y0->d, + GGML_FP16_TO_FP32(x0->dmin) * y1->d, + GGML_FP16_TO_FP32(x1->dmin) * y0->d, + GGML_FP16_TO_FP32(x1->dmin) * y1->d, + }; + vfsum = vmlsq_f32(vfsum, vcvtq_f32_s32(vld1q_s32(bias)), dmins); + + const float32x4_t superblock_scale = { + GGML_FP16_TO_FP32(x0->d) * y0->d, + GGML_FP16_TO_FP32(x0->d) * y1->d, + GGML_FP16_TO_FP32(x1->d) * y0->d, + GGML_FP16_TO_FP32(x1->d) * y1->d, + }; + vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); + } + } + + // vfsum = ABCD -> ACBD + // AC -> s, BD -> (s+bs) + vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); + vst1_f32(s, vget_low_f32 (vfsum)); + vst1_f32(s + bs, vget_high_f32(vfsum)); + + return; + } +#endif + #ifdef __ARM_FEATURE_SVE float sumf = 0; for (int i = 0; i < nb; ++i) { diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index aa51dc21a..1dc425fef 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -270,7 +270,11 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { .from_float = quantize_row_q4_K, .vec_dot = ggml_vec_dot_q4_K_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, +#if defined (__ARM_FEATURE_MATMUL_INT8) + .nrows = 2, +#else .nrows = 1, +#endif }, [GGML_TYPE_Q5_K] = { .from_float = quantize_row_q5_K, From 2b131621e60d8ec2cc961201beb6773ab37b6b69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Thu, 29 May 2025 15:36:05 +0200 Subject: [PATCH 09/21] gguf-py : add support for sub_type (in arrays) in GGUFWriter add_key_value method (#13561) --- gguf-py/gguf/gguf_writer.py | 13 +++++++----- gguf-py/gguf/scripts/gguf_editor_gui.py | 21 ++++++++++++------- gguf-py/gguf/scripts/gguf_new_metadata.py | 7 +++++-- gguf-py/pyproject.toml | 2 +- requirements/requirements-gguf_editor_gui.txt | 2 +- 5 files changed, 29 insertions(+), 16 deletions(-) diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index e507b7019..de6e45ae8 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -49,6 +49,7 @@ class TensorInfo: class GGUFValue: value: Any type: GGUFValueType + sub_type: GGUFValueType | None = None class WriterState(Enum): @@ -238,7 +239,7 @@ class GGUFWriter: for key, val in kv_data.items(): kv_bytes += self._pack_val(key, GGUFValueType.STRING, add_vtype=False) - kv_bytes += self._pack_val(val.value, val.type, add_vtype=True) + kv_bytes += self._pack_val(val.value, val.type, add_vtype=True, sub_type=val.sub_type) fout.write(kv_bytes) @@ -268,11 +269,11 @@ class GGUFWriter: fout.flush() self.state = WriterState.TI_DATA - def add_key_value(self, key: str, val: Any, vtype: GGUFValueType) -> None: + def add_key_value(self, key: str, val: Any, vtype: GGUFValueType, sub_type: GGUFValueType | None = None) -> None: if any(key in kv_data for kv_data in self.kv_data): raise ValueError(f'Duplicated key name {key!r}') - self.kv_data[0][key] = GGUFValue(value=val, type=vtype) + self.kv_data[0][key] = GGUFValue(value=val, type=vtype, sub_type=sub_type) def add_uint8(self, key: str, val: int) -> None: self.add_key_value(key,val, GGUFValueType.UINT8) @@ -1022,7 +1023,7 @@ class GGUFWriter: pack_prefix = '<' if self.endianess == GGUFEndian.LITTLE else '>' return struct.pack(f'{pack_prefix}{fmt}', value) - def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool) -> bytes: + def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool, sub_type: GGUFValueType | None = None) -> bytes: kv_data = bytearray() if add_vtype: @@ -1043,7 +1044,9 @@ class GGUFWriter: if len(val) == 0: raise ValueError("Invalid GGUF metadata array. Empty array") - if isinstance(val, bytes): + if sub_type is not None: + ltype = sub_type + elif isinstance(val, bytes): ltype = GGUFValueType.UINT8 else: ltype = GGUFValueType.get_type(val[0]) diff --git a/gguf-py/gguf/scripts/gguf_editor_gui.py b/gguf-py/gguf/scripts/gguf_editor_gui.py index 3d38b5cba..05f4db0f8 100755 --- a/gguf-py/gguf/scripts/gguf_editor_gui.py +++ b/gguf-py/gguf/scripts/gguf_editor_gui.py @@ -1521,19 +1521,21 @@ class GGUFEditorWindow(QMainWindow): continue # Apply changes if any + sub_type = None if field.name in self.metadata_changes: value_type, value = self.metadata_changes[field.name] if value_type == GGUFValueType.ARRAY: # Handle array values - element_type, array_values = value - writer.add_array(field.name, array_values) - else: - writer.add_key_value(field.name, value, value_type) + sub_type, value = value else: # Copy original value value = field.contents() - if value is not None and field.types: - writer.add_key_value(field.name, value, field.types[0]) + value_type = field.types[0] + if value_type == GGUFValueType.ARRAY: + sub_type = field.types[-1] + + if value is not None: + writer.add_key_value(field.name, value, value_type, sub_type=sub_type) # Add new metadata for key, (value_type, value) in self.metadata_changes.items(): @@ -1541,7 +1543,12 @@ class GGUFEditorWindow(QMainWindow): if self.reader.get_field(key) is not None: continue - writer.add_key_value(key, value, value_type) + sub_type = None + if value_type == GGUFValueType.ARRAY: + # Handle array values + sub_type, value = value + + writer.add_key_value(key, value, value_type, sub_type=sub_type) # Add tensors (including data) for tensor in self.reader.tensors: diff --git a/gguf-py/gguf/scripts/gguf_new_metadata.py b/gguf-py/gguf/scripts/gguf_new_metadata.py index 7aff6c925..63f230034 100755 --- a/gguf-py/gguf/scripts/gguf_new_metadata.py +++ b/gguf-py/gguf/scripts/gguf_new_metadata.py @@ -24,6 +24,7 @@ class MetadataDetails(NamedTuple): type: gguf.GGUFValueType value: Any description: str = '' + sub_type: gguf.GGUFValueType | None = None def get_field_data(reader: gguf.GGUFReader, key: str) -> Any: @@ -57,7 +58,9 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new logger.debug(f'Removing {field.name}') continue - old_val = MetadataDetails(field.types[0], field.contents()) + val_type = field.types[0] + sub_type = field.types[-1] if val_type == gguf.GGUFValueType.ARRAY else None + old_val = MetadataDetails(val_type, field.contents(), sub_type=sub_type) val = new_metadata.get(field.name, old_val) if field.name in new_metadata: @@ -67,7 +70,7 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new logger.debug(f'Copying {field.name}') if val.value is not None: - writer.add_key_value(field.name, val.value, val.type) + writer.add_key_value(field.name, val.value, val.type, sub_type=sub_type if val.sub_type is None else val.sub_type) if gguf.Keys.Tokenizer.CHAT_TEMPLATE in new_metadata: logger.debug('Adding chat template(s)') diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index bb9b86ace..f11351cba 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gguf" -version = "0.16.3" +version = "0.17.0" description = "Read and write ML models in GGUF for GGML" authors = ["GGML "] packages = [ diff --git a/requirements/requirements-gguf_editor_gui.txt b/requirements/requirements-gguf_editor_gui.txt index 920dc7cf9..fd253364e 100644 --- a/requirements/requirements-gguf_editor_gui.txt +++ b/requirements/requirements-gguf_editor_gui.txt @@ -1,3 +1,3 @@ numpy~=1.26.4 PySide6~=6.9.0 -gguf>=0.16.0 +gguf>=0.17.0 From e83ba3e460651b20a594e9f2f0f0bffb998d3ce1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Thu, 29 May 2025 21:42:31 +0200 Subject: [PATCH 10/21] llama : add support for jina-reranker-v2 (#13900) --- convert_hf_to_gguf.py | 120 +++++++++++++++++++++++---------- gguf-py/gguf/constants.py | 1 + gguf-py/gguf/tensor_mapping.py | 2 + src/llama-arch.cpp | 1 + src/llama-model.cpp | 67 +++++++++--------- 5 files changed, 119 insertions(+), 72 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 8fcff0de7..868bb6826 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -3782,44 +3782,93 @@ class BertModel(TextModel): from sentencepiece import sentencepiece_model_pb2 as model tokenizer_path = self.dir_model / 'sentencepiece.bpe.model' + + tokenizer_json = {} + tokenizer_config_json = {} if not tokenizer_path.is_file(): - raise FileNotFoundError(f"File not found: {tokenizer_path}") + tokenizer_path = self.dir_model / 'tokenizer.json' + tokenizer_config_path = self.dir_model / 'tokenizer_config.json' - sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] - sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) - assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM + if not tokenizer_path.is_file(): + raise FileNotFoundError(f"File not found: {tokenizer_path}") - add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix - remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces - precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap + from base64 import b64decode + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained(self.dir_model) - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) + with open(tokenizer_path, "r", encoding="utf-8") as fp: + tokenizer_json = json.load(fp) - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) + if tokenizer_config_path.is_file(): + with open(tokenizer_config_path, "r", encoding="utf-8") as fp: + tokenizer_config_json = json.load(fp) + + add_prefix = tokenizer.add_prefix_space + remove_whitespaces = tokenizer.clean_up_tokenization_spaces + precompiled_charsmap = b64decode(tokenizer_json["normalizer"]["precompiled_charsmap"]) + + vocab_size = self.hparams.get("vocab_size", tokenizer.vocab_size) + else: + sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] + sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) + assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM + + add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix + remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces + precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap + + tokenizer = SentencePieceProcessor() + tokenizer.LoadFromFile(str(tokenizer_path)) + + vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] scores: list[float] = [-10000.0] * vocab_size toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) + if isinstance(tokenizer, SentencePieceProcessor): + for token_id in range(tokenizer.vocab_size()): + piece = tokenizer.IdToPiece(token_id) + text = piece.encode("utf-8") + score = tokenizer.GetScore(token_id) - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE + toktype = SentencePieceTokenTypes.NORMAL + if tokenizer.IsUnknown(token_id): + toktype = SentencePieceTokenTypes.UNKNOWN + elif tokenizer.IsControl(token_id): + toktype = SentencePieceTokenTypes.CONTROL + elif tokenizer.IsUnused(token_id): + toktype = SentencePieceTokenTypes.UNUSED + elif tokenizer.IsByte(token_id): + toktype = SentencePieceTokenTypes.BYTE - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype + tokens[token_id] = text + scores[token_id] = score + toktypes[token_id] = toktype + else: + added_vocab = tokenizer.get_added_vocab() + unk_token = tokenizer_config_json.get("unk_token") + unk_token_id = added_vocab.get(unk_token, tokenizer_json["model"].get("unk_id", 3)) + + for token_id in range(vocab_size): + piece = tokenizer._convert_id_to_token(token_id) + text = piece.encode("utf-8") + score = tokenizer_json["model"]["vocab"][token_id][1] + + toktype = SentencePieceTokenTypes.NORMAL + if token_id == unk_token_id: + toktype = SentencePieceTokenTypes.UNKNOWN + elif token_id in tokenizer.all_special_ids: + toktype = SentencePieceTokenTypes.CONTROL + elif token_id in added_vocab.values(): + toktype = SentencePieceTokenTypes.USER_DEFINED + # No reliable way to detect this, but jina doesn't have any + # elif tokenizer.IsByte(token_id): + # toktype = SentencePieceTokenTypes.BYTE + + tokens[token_id] = text + scores[token_id] = score + toktypes[token_id] = toktype if vocab_size > len(tokens): pad_count = vocab_size - len(tokens) @@ -3829,15 +3878,16 @@ class BertModel(TextModel): scores.append(-1000.0) toktypes.append(SentencePieceTokenTypes.UNUSED) - # realign tokens (see HF tokenizer code) - tokens = [b'', b'', b'', b''] + tokens[3:-1] - scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1] - toktypes = [ - SentencePieceTokenTypes.CONTROL, - SentencePieceTokenTypes.CONTROL, - SentencePieceTokenTypes.CONTROL, - SentencePieceTokenTypes.UNKNOWN, - ] + toktypes[3:-1] + if isinstance(tokenizer, SentencePieceProcessor): + # realign tokens (see HF tokenizer code) + tokens = [b'', b'', b'', b''] + tokens[3:-1] + scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1] + toktypes = [ + SentencePieceTokenTypes.CONTROL, + SentencePieceTokenTypes.CONTROL, + SentencePieceTokenTypes.CONTROL, + SentencePieceTokenTypes.UNKNOWN, + ] + toktypes[3:-1] self.gguf_writer.add_tokenizer_model("t5") self.gguf_writer.add_tokenizer_pre("default") diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 635b61f22..3ee2b2064 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -1036,6 +1036,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.POS_EMBD, MODEL_TENSOR.OUTPUT_NORM, MODEL_TENSOR.ATTN_OUT_NORM, + MODEL_TENSOR.ATTN_QKV, MODEL_TENSOR.ATTN_Q, MODEL_TENSOR.ATTN_K, MODEL_TENSOR.ATTN_V, diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 48167dd64..d0dad7036 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -157,6 +157,7 @@ class TensorNameMap: "h.{bid}.attn.c_attn", # gpt2 "transformer.h.{bid}.mixer.Wqkv", # phi2 "encoder.layers.{bid}.attn.Wqkv", # nomic-bert + "encoder.layers.{bid}.mixer.Wqkv", # jina "model.layers.{bid}.self_attn.qkv_proj", # phi3 "encoder.layers.{bid}.self_attention.query_key_value", # chatglm "transformer.layers.{bid}.attn.qkv_proj", # openelm @@ -224,6 +225,7 @@ class TensorNameMap: "model.layers.layers.{bid}.self_attn.o_proj", # plamo "model.layers.{bid}.attention.wo", # internlm2 "encoder.layers.{bid}.attn.out_proj", # nomic-bert + "encoder.layers.{bid}.mixer.out_proj", # jina "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx "encoder.layers.{bid}.self_attention.dense", # chatglm diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 2bb18c85f..c0590e105 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -450,6 +450,7 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_TOKEN_TYPES, "token_types" }, { LLM_TENSOR_POS_EMBD, "position_embd" }, { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 4a4618a2b..ecaae6bf0 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2132,7 +2132,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) { for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i]; - if (arch == LLM_ARCH_BERT) { + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED); + + if (!layer.wqkv) { layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); @@ -2141,12 +2144,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); - } else { - layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); - } - - if (arch == LLM_ARCH_NOMIC_BERT_MOE) { - layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0); } layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); @@ -5910,36 +5907,11 @@ struct llm_build_bert : public llm_graph_context { ggml_tensor * Vcur; // self-attention - if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) { - Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq); - - if (model.layers[il].attn_q_norm) { - Qcur = build_norm(Qcur, - model.layers[il].attn_q_norm, - model.layers[il].attn_q_norm_b, - LLM_NORM, il); - } - - Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk); - - if (model.layers[il].attn_k_norm) { - Kcur = build_norm(Kcur, - model.layers[il].attn_k_norm, - model.layers[il].attn_k_norm_b, - LLM_NORM, il); - } - - Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - } else { - // compute Q and K and RoPE them + if (model.layers[il].wqkv) { cur = build_lora_mm(model.layers[il].wqkv, cur); cb(cur, "wqkv", il); - if (model.arch == LLM_ARCH_NOMIC_BERT_MOE) { + if (model.layers[il].bqkv) { cur = ggml_add(ctx0, cur, model.layers[il].bqkv); cb(cur, "bqkv", il); } @@ -5947,11 +5919,32 @@ struct llm_build_bert : public llm_graph_context { Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + } else { + Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq); + Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk); + Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv); + } - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + if (model.layers[il].attn_q_norm) { + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, + model.layers[il].attn_q_norm_b, + LLM_NORM, il); + } + if (model.layers[il].attn_k_norm) { + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, + model.layers[il].attn_k_norm_b, + LLM_NORM, il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + // RoPE + if (model.arch == LLM_ARCH_NOMIC_BERT || model.arch == LLM_ARCH_NOMIC_BERT_MOE) { Qcur = ggml_rope_ext( ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, From ec9e0301fef6476df83e94842c3b625501c95566 Mon Sep 17 00:00:00 2001 From: Christian Kastner Date: Fri, 30 May 2025 01:28:54 +0200 Subject: [PATCH 11/21] cmake: Guard GGML_CPU_ALL_VARIANTS by architecture (#13890) --- ggml/src/CMakeLists.txt | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 330f9d820..5681ecddb 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -289,16 +289,20 @@ if (GGML_CPU_ALL_VARIANTS) if (NOT GGML_BACKEND_DL) message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") endif() - ggml_add_cpu_backend_variant(x64) - ggml_add_cpu_backend_variant(sse42 SSE42) - ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) - ggml_add_cpu_backend_variant(haswell SSE42 AVX F16C AVX2 BMI2 FMA) - ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) - ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) - ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI) - if (NOT MSVC) - # MSVC doesn't support AMX - ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) + if (GGML_SYSTEM_ARCH STREQUAL "x86") + ggml_add_cpu_backend_variant(x64) + ggml_add_cpu_backend_variant(sse42 SSE42) + ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) + ggml_add_cpu_backend_variant(haswell SSE42 AVX F16C AVX2 BMI2 FMA) + ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) + ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) + ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI) + if (NOT MSVC) + # MSVC doesn't support AMX + ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) + endif() + else() + message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported on ${GGML_SYSTEM_ARCH}") endif() elseif (GGML_CPU) ggml_add_cpu_backend_variant_impl("") From 2c90da4c7ec694797f524042aaafbb047a7e65ff Mon Sep 17 00:00:00 2001 From: zhangkaihuo Date: Fri, 30 May 2025 16:31:48 +0800 Subject: [PATCH 12/21] llama : use llm_build_granite for minicpm (#13911) --- src/llama-model.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index ecaae6bf0..a1aa51412 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -13260,7 +13260,6 @@ llm_graph_result_ptr llama_model::build_graph( switch (arch) { case LLM_ARCH_LLAMA: - case LLM_ARCH_MINICPM: { llm = std::make_unique(*this, params, gf); } break; @@ -13501,6 +13500,7 @@ llm_graph_result_ptr llama_model::build_graph( } break; case LLM_ARCH_GRANITE: case LLM_ARCH_GRANITE_MOE: + case LLM_ARCH_MINICPM: { llm = std::make_unique(*this, params, gf); } break; From 291f2b6913c7ef8350dbf0e77da38f7af131a08e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90inh=20Tr=E1=BB=8Dng=20Huy?= <77562200+huydt84@users.noreply.github.com> Date: Fri, 30 May 2025 18:56:02 +0900 Subject: [PATCH 13/21] llama : add support for DistilBert (#13907) * add distilbert * small fixes * add note for LLM_ARCH_DISTIL_BERT * Use MODEL_ARCH.BERT for DistilBert --------- Co-authored-by: dinhhuy --- convert_hf_to_gguf.py | 26 +++++++++++++++++++++++--- gguf-py/gguf/tensor_mapping.py | 9 +++++++++ src/llama-model.cpp | 8 +++++--- 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 868bb6826..5b4ed0d5d 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -523,15 +523,15 @@ class TextModel(ModelBase): self.gguf_writer.add_context_length(n_ctx) logger.info(f"gguf: context length = {n_ctx}") - if (n_embd := self.find_hparam(["hidden_size", "n_embd"], optional=True)) is not None: + if (n_embd := self.find_hparam(["hidden_size", "n_embd", "dim"], optional=True)) is not None: self.gguf_writer.add_embedding_length(n_embd) logger.info(f"gguf: embedding length = {n_embd}") - if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None: + if (n_ff := self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"], optional=True)) is not None: self.gguf_writer.add_feed_forward_length(n_ff) logger.info(f"gguf: feed forward length = {n_ff}") - if (n_head := self.find_hparam(["num_attention_heads", "n_head"], optional=True)) is not None: + if (n_head := self.find_hparam(["num_attention_heads", "n_head", "n_heads"], optional=True)) is not None: self.gguf_writer.add_head_count(n_head) logger.info(f"gguf: head count = {n_head}") @@ -3907,6 +3907,26 @@ class BertModel(TextModel): self.gguf_writer.add_add_eos_token(True) +@ModelBase.register("DistilBertModel", "DistilBertForMaskedLM", "DistilBertForSequenceClassification") +class DistilBertModel(BertModel): + model_arch = gguf.MODEL_ARCH.BERT + + def set_gguf_parameters(self): + self.gguf_writer.add_layer_norm_eps(1e-12) + logger.info("gguf: layer norm epsilon = 1e-12") + super().set_gguf_parameters() + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + if name.startswith("distilbert."): + name = name[11:] + + # These layers act as MLM head, so we don't need them + if name.startswith("vocab_"): + return [] + + return super().modify_tensors(data_torch, name, bid) + + @ModelBase.register("RobertaModel", "RobertaForSequenceClassification") class RobertaModel(BertModel): model_arch = gguf.MODEL_ARCH.BERT diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index d0dad7036..93dd1d802 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -169,6 +169,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom "layers.{bid}.attention.wq", # llama-pth "encoder.layer.{bid}.attention.self.query", # bert + "transformer.layer.{bid}.attention.q_lin", # distillbert "transformer.h.{bid}.attn.q_proj", # gpt-j "model.layers.layers.{bid}.self_attn.q_proj", # plamo "model.layers.{bid}.attention.wq", # internlm2 @@ -183,6 +184,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom "layers.{bid}.attention.wk", # llama-pth "encoder.layer.{bid}.attention.self.key", # bert + "transformer.layer.{bid}.attention.k_lin", # distillbert "transformer.h.{bid}.attn.k_proj", # gpt-j "transformer.h.{bid}.attn.k", # refact "model.layers.layers.{bid}.self_attn.k_proj", # plamo @@ -197,6 +199,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 phimoe "layers.{bid}.attention.wv", # llama-pth "encoder.layer.{bid}.attention.self.value", # bert + "transformer.layer.{bid}.attention.v_lin", # distillbert "transformer.h.{bid}.attn.v_proj", # gpt-j "transformer.h.{bid}.attn.v", # refact "model.layers.layers.{bid}.self_attn.v_proj", # plamo @@ -217,6 +220,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.linear_attn", # deci "layers.{bid}.attention.wo", # llama-pth "encoder.layer.{bid}.attention.output.dense", # bert + "transformer.layer.{bid}.attention.out_lin", # distillbert "transformer.h.{bid}.attn.out_proj", # gpt-j "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon "model.layers.{bid}.self_attn.dense", # persimmon @@ -237,6 +241,7 @@ class TensorNameMap: # Attention output norm MODEL_TENSOR.ATTN_OUT_NORM: ( "encoder.layer.{bid}.attention.output.LayerNorm", # bert + "transformer.layer.{bid}.sa_layer_norm", # distillbert "encoder.layers.{bid}.norm1", # nomic-bert "transformer.decoder_layer.{bid}.rms_norm_1", # Grok "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx @@ -313,6 +318,7 @@ class TensorNameMap: "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron olmo2 "layers.{bid}.feed_forward.w3", # llama-pth "encoder.layer.{bid}.intermediate.dense", # bert + "transformer.layer.{bid}.ffn.lin1", # distillbert "transformer.h.{bid}.mlp.fc_in", # gpt-j "transformer.h.{bid}.mlp.linear_3", # refact "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon @@ -396,6 +402,7 @@ class TensorNameMap: "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron olmo2 "layers.{bid}.feed_forward.w2", # llama-pth "encoder.layer.{bid}.output.dense", # bert + "transformer.layer.{bid}.ffn.lin2", # distillbert "transformer.h.{bid}.mlp.fc_out", # gpt-j "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon @@ -457,6 +464,7 @@ class TensorNameMap: MODEL_TENSOR.LAYER_OUT_NORM: ( "encoder.layer.{bid}.output.LayerNorm", # bert + "transformer.layer.{bid}.output_layer_norm", # distillbert "encoder.layers.{bid}.norm2", # nomic-bert "transformer.decoder_layer.{bid}.rms_norm_3", # Grok "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2 @@ -827,6 +835,7 @@ class TensorNameMap: MODEL_TENSOR.CLS: ( "classifier", # jina "classifier.dense", # roberta + "pre_classifier", # distillbert ), MODEL_TENSOR.CLS_OUT: ( diff --git a/src/llama-model.cpp b/src/llama-model.cpp index a1aa51412..3f1f6c9bf 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2114,7 +2114,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { case LLM_ARCH_NOMIC_BERT_MOE: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); - type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0); + type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, TENSOR_NOT_REQUIRED); if (arch == LLM_ARCH_BERT) { pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0); @@ -5885,8 +5885,10 @@ struct llm_build_bert : public llm_graph_context { inpL = build_inp_embd(model.tok_embd); // token types are hardcoded to zero ("Sentence A") - ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0); - inpL = ggml_add(ctx0, inpL, type_row0); + if (model.type_embd) { + ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0); + inpL = ggml_add(ctx0, inpL, type_row0); + } if (model.arch == LLM_ARCH_BERT) { inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL); } From 07e4351ce663a7802b31f92fd7bc0e555c2044b6 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Fri, 30 May 2025 12:24:37 +0200 Subject: [PATCH 14/21] convert : allow partial update to the chkhsh pre-tokenizer list (#13847) * convert : allow partial update to the chkhsh pre-tokenizer list * code style * update tokenizer out * rm inp/out files for models not having gguf * fixed hash for glm * skip nomic-bert-moe test * Update convert_hf_to_gguf_update.py * fix minerva-7b hash * rm redundant import --- convert_hf_to_gguf.py | 27 +-- convert_hf_to_gguf_update.py | 178 +++++++++++++------- models/ggml-vocab-bert-bge.gguf.inp | 2 +- models/ggml-vocab-bert-bge.gguf.out | 2 +- models/ggml-vocab-chameleon.gguf.inp | 112 ------------ models/ggml-vocab-chameleon.gguf.out | 46 ----- models/ggml-vocab-command-r.gguf.inp | 2 +- models/ggml-vocab-command-r.gguf.out | 2 +- models/ggml-vocab-deepseek-coder.gguf.inp | 2 +- models/ggml-vocab-deepseek-coder.gguf.out | 2 +- models/ggml-vocab-deepseek-llm.gguf.inp | 2 +- models/ggml-vocab-deepseek-llm.gguf.out | 2 +- models/ggml-vocab-deepseek-r1-qwen.gguf.inp | 112 ------------ models/ggml-vocab-deepseek-r1-qwen.gguf.out | 46 ----- models/ggml-vocab-falcon.gguf.inp | 2 +- models/ggml-vocab-falcon.gguf.out | 2 +- models/ggml-vocab-gpt-2.gguf.inp | 2 +- models/ggml-vocab-gpt-2.gguf.out | 2 +- models/ggml-vocab-gpt-4o.gguf.inp | 112 ------------ models/ggml-vocab-gpt-4o.gguf.out | 46 ----- models/ggml-vocab-llama-bpe.gguf.inp | 2 +- models/ggml-vocab-llama-bpe.gguf.out | 2 +- models/ggml-vocab-llama-spm.gguf.inp | 2 +- models/ggml-vocab-llama-spm.gguf.out | 2 +- models/ggml-vocab-llama4.gguf.inp | 112 ------------ models/ggml-vocab-llama4.gguf.out | 46 ----- models/ggml-vocab-mpt.gguf.inp | 2 +- models/ggml-vocab-mpt.gguf.out | 2 +- models/ggml-vocab-nomic-bert-moe.gguf.inp | 112 ------------ models/ggml-vocab-nomic-bert-moe.gguf.out | 46 ----- models/ggml-vocab-phi-3.gguf.inp | 2 +- models/ggml-vocab-phi-3.gguf.out | 2 +- models/ggml-vocab-pixtral.gguf.inp | 112 ------------ models/ggml-vocab-pixtral.gguf.out | 46 ----- models/ggml-vocab-qwen2.gguf.inp | 2 +- models/ggml-vocab-qwen2.gguf.out | 2 +- models/ggml-vocab-refact.gguf.inp | 2 +- models/ggml-vocab-refact.gguf.out | 2 +- models/ggml-vocab-roberta-bpe.gguf.inp | 112 ------------ models/ggml-vocab-roberta-bpe.gguf.out | 46 ----- models/ggml-vocab-starcoder.gguf.inp | 2 +- models/ggml-vocab-starcoder.gguf.out | 2 +- tests/CMakeLists.txt | 4 +- 43 files changed, 159 insertions(+), 1208 deletions(-) delete mode 100644 models/ggml-vocab-chameleon.gguf.inp delete mode 100644 models/ggml-vocab-chameleon.gguf.out delete mode 100644 models/ggml-vocab-deepseek-r1-qwen.gguf.inp delete mode 100644 models/ggml-vocab-deepseek-r1-qwen.gguf.out delete mode 100644 models/ggml-vocab-gpt-4o.gguf.inp delete mode 100644 models/ggml-vocab-gpt-4o.gguf.out delete mode 100644 models/ggml-vocab-llama4.gguf.inp delete mode 100644 models/ggml-vocab-llama4.gguf.out delete mode 100644 models/ggml-vocab-nomic-bert-moe.gguf.inp delete mode 100644 models/ggml-vocab-nomic-bert-moe.gguf.out delete mode 100644 models/ggml-vocab-pixtral.gguf.inp delete mode 100644 models/ggml-vocab-pixtral.gguf.out delete mode 100644 models/ggml-vocab-roberta-bpe.gguf.inp delete mode 100644 models/ggml-vocab-roberta-bpe.gguf.out diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 5b4ed0d5d..54738b862 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -674,12 +674,12 @@ class TextModel(ModelBase): if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed": # ref: https://huggingface.co/tiiuae/falcon-7b res = "falcon" - if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e": - # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base - res = "falcon3" if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f": # ref: https://huggingface.co/BAAI/bge-small-en-v1.5 res = "bert-bge" + if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e": + # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base + res = "falcon3" if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7": # ref: https://huggingface.co/BAAI/bge-large-zh-v1.5 res = "bert-bge-large" @@ -731,9 +731,6 @@ class TextModel(ModelBase): if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a": # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code res = "jina-v2-code" - if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b" or chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516": - # ref: https://huggingface.co/THUDM/glm-4-9b-chat - res = "chatglm-bpe" if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee": # ref: https://huggingface.co/LumiOpen/Viking-7B res = "viking" @@ -764,9 +761,6 @@ class TextModel(ModelBase): if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450": # ref: https://huggingface.co/facebook/chameleon-7b res = "chameleon" - if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35": - # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0 - res = "minerva-7b" if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65": # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base res = "roberta-bpe" @@ -797,15 +791,24 @@ class TextModel(ModelBase): if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406": # ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct res = "llama4" - if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2": - # ref: https://huggingface.co/THUDM/glm-4-9b-hf - res = "glm4" if chkhsh == "0e9433cbbb161f89e264eb32e8e64bfe69e834973ffca5d41d3948a604a3e2a3": # ref: https://huggingface.co/mistral-community/pixtral-12b res = "pixtral" if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec": # ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base res = "seed-coder" + if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b": + # ref: https://huggingface.co/THUDM/glm-4-9b-chat + res = "chatglm-bpe" + if chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516": + # ref: https://huggingface.co/THUDM/glm-4-9b-chat + res = "chatglm-bpe" + if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2": + # ref: https://huggingface.co/THUDM/glm-4-9b-hf + res = "glm4" + if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35": + # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0 + res = "minerva-7b" if res is None: logger.warning("\n") diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index c66ec547b..2f733f097 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -1,28 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# This script downloads the tokenizer models of the specified models from Huggingface and -# generates the get_vocab_base_pre() function for convert_hf_to_gguf.py -# -# This is necessary in order to analyze the type of pre-tokenizer used by the model and -# provide the necessary information to llama.cpp via the GGUF header in order to implement -# the same pre-tokenizer. -# -# ref: https://github.com/ggml-org/llama.cpp/pull/6920 -# -# Instructions: -# -# - Add a new model to the "models" list -# - Run the script with your huggingface token: -# -# python3 convert_hf_to_gguf_update.py -# -# - The convert_hf_to_gguf.py script will have had its get_vocab_base_pre() function updated -# - Update llama.cpp with the new pre-tokenizer if necessary -# -# TODO: generate tokenizer tests for llama.cpp -# - import logging import os import pathlib @@ -32,6 +10,7 @@ import requests import sys import json import shutil +import argparse from hashlib import sha256 from enum import IntEnum, auto @@ -41,6 +20,11 @@ logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("convert_hf_to_gguf_update") sess = requests.Session() +convert_py_pth = pathlib.Path("convert_hf_to_gguf.py") +convert_py = convert_py_pth.read_text(encoding="utf-8") +hf_token_pth = pathlib.Path.home() / ".cache" / "huggingface" / "token" +hf_token = hf_token_pth.read_text(encoding="utf-8").strip() if hf_token_pth.exists() else None + class TOKENIZER_TYPE(IntEnum): SPM = auto() @@ -49,20 +33,49 @@ class TOKENIZER_TYPE(IntEnum): UGM = auto() +DOC_STRING = """ +This script downloads the tokenizer models of the specified models from Huggingface and +generates the get_vocab_base_pre() function for convert_hf_to_gguf.py + +/!\\ It is intended to be used by contributors and is not meant to be run by end users + +This is necessary in order to analyze the type of pre-tokenizer used by the model and +provide the necessary information to llama.cpp via the GGUF header in order to implement +the same pre-tokenizer. + +ref: https://github.com/ggml-org/llama.cpp/pull/6920 + +Instructions: + +- Add a new model to the "models" list +- Run the script with your huggingface token + By default, token will be read from ~/.cache/huggingface/token +- The convert_hf_to_gguf.py script will have had its get_vocab_base_pre() function updated +- Update llama.cpp with the new pre-tokenizer if necessary +""" +# TODO: generate tokenizer tests for llama.cpp + +parser = argparse.ArgumentParser(description=DOC_STRING, formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument( + "--full", action="store_true", + help="download full list of models - make sure you have access to all of them", +) +parser.add_argument( + "hf_token", + help="optional HF token", + nargs="?", +) +args = parser.parse_args() +hf_token = args.hf_token if args.hf_token is not None else hf_token + +if hf_token is None: + logger.error("HF token is required. Please provide it as an argument or set it in ~/.cache/huggingface/token") + sys.exit(1) + # TODO: this string has to exercise as much pre-tokenizer functionality as possible # will be updated with time - contributions welcome CHK_TXT = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL' -if len(sys.argv) == 2: - token = sys.argv[1] - if not token.startswith("hf_"): - logger.info("Huggingface token seems invalid") - logger.info("Usage: python convert_hf_to_gguf_update.py ") - sys.exit(1) -else: - logger.info("Usage: python convert_hf_to_gguf_update.py ") - sys.exit(1) - # TODO: add models here, base models preferred models = [ {"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", }, @@ -103,7 +116,6 @@ models = [ {"name": "exaone", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", }, {"name": "phi-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/microsoft/phi-2", }, {"name": "chameleon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/facebook/chameleon-7b", }, - {"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", }, {"name": "roberta-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sentence-transformers/stsb-roberta-base"}, {"name": "gigachat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct"}, {"name": "megrez", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Infinigence/Megrez-3B-Instruct"}, @@ -114,11 +126,19 @@ models = [ {"name": "trillion", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/trillionlabs/Trillion-7B-preview", }, {"name": "bailingmoe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-lite", }, {"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", }, - {"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", }, {"name": "pixtral", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistral-community/pixtral-12b", }, {"name": "seed-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base", }, ] +# some models are known to be broken upstream, so we will skip them as exceptions +pre_computed_hashes = [ + # chatglm-bpe has 2 hashes, why? + {"name": "chatglm-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-chat", "chkhsh": "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b"}, + {"name": "chatglm-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-chat", "chkhsh": "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516"}, + {"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", "chkhsh": "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2"}, + {"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", "chkhsh": "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35"}, +] + def download_file_with_auth(url, token, save_path): headers = {"Authorization": f"Bearer {token}"} @@ -169,9 +189,29 @@ def download_model(model): if os.path.isfile(save_path): logger.info(f"{name}: File {save_path} already exists - skipping") continue - download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path) + download_file_with_auth(f"{repo}/resolve/main/{file}", hf_token, save_path) +# get list of existing models and chkhsh from the convert_hf_to_gguf.py file +# returns mapping res --> chkhsh +def get_existing_models(convert_py): + pattern = r'if chkhsh == "([a-f0-9]{64})":\s*\n\s*.*\s*res = "([^"]+)"' + matches = re.findall(pattern, convert_py) + output = {} + for chkhsh, res in matches: + output[res] = chkhsh + return output + + +existing_models = {} +all_models = models.copy() +if not args.full: + # Filter out models that already exist in convert_hf_to_gguf.py + existing_models = get_existing_models(convert_py) + all_models = models.copy() + models = [model for model in all_models if model["name"] not in existing_models] + +logging.info(f"Downloading {len(models)} models...") for model in models: try: download_model(model) @@ -182,9 +222,10 @@ for model in models: # generate the source code for the convert_hf_to_gguf.py:get_vocab_base_pre() function: src_ifs = "" -for model in models: +for model in [*all_models, *pre_computed_hashes]: name = model["name"] tokt = model["tokt"] + chkhsh = model.get("chkhsh") if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM: continue @@ -195,35 +236,44 @@ for model in models: continue # create the tokenizer - try: - if name == "t5": - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False) - else: - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}") - except OSError as e: - logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}") - continue # Skip to the next model if the tokenizer can't be loaded + if chkhsh is not None: + # if the model has a pre-computed hash, use it + logger.info(f"Using pre-computed hash for model {name}: {chkhsh}") + elif name in existing_models: + # if the model already exists in convert_hf_to_gguf.py, skip compute hash + chkhsh = existing_models[name] + else: + # otherwise, compute the hash of the tokenizer + try: + logger.info(f"Loading tokenizer from {f'models/tokenizers/{name}'}...") + if name == "t5": + tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False) + else: + tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}") + except OSError as e: + logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}") + continue # Skip to the next model if the tokenizer can't be loaded - chktok = tokenizer.encode(CHK_TXT) - chkhsh = sha256(str(chktok).encode()).hexdigest() + chktok = tokenizer.encode(CHK_TXT) + chkhsh = sha256(str(chktok).encode()).hexdigest() - logger.info(f"model: {name}") - logger.info(f"tokt: {tokt}") - logger.info(f"repo: {model['repo']}") - logger.info(f"chktok: {chktok}") - logger.info(f"chkhsh: {chkhsh}") + logger.info(f"model: {name}") + logger.info(f"tokt: {tokt}") + logger.info(f"repo: {model['repo']}") + logger.info(f"chktok: {chktok}") + logger.info(f"chkhsh: {chkhsh}") - # print the "pre_tokenizer" content from the tokenizer.json - with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f: - cfg = json.load(f) - normalizer = cfg["normalizer"] - logger.info("normalizer: " + json.dumps(normalizer, indent=4)) - pre_tokenizer = cfg["pre_tokenizer"] - logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4)) - if "ignore_merges" in cfg["model"]: - logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4)) + # print the "pre_tokenizer" content from the tokenizer.json + with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f: + cfg = json.load(f) + normalizer = cfg["normalizer"] + logger.info("normalizer: " + json.dumps(normalizer, indent=4)) + pre_tokenizer = cfg["pre_tokenizer"] + logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4)) + if "ignore_merges" in cfg["model"]: + logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4)) - logger.info("") + logger.info("") src_ifs += f" if chkhsh == \"{chkhsh}\":\n" src_ifs += f" # ref: {model['repo']}\n" @@ -271,8 +321,6 @@ src_func = f""" return res """ -convert_py_pth = pathlib.Path("convert_hf_to_gguf.py") -convert_py = convert_py_pth.read_text(encoding="utf-8") convert_py = re.sub( r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)", lambda m: m.group(1) + src_func + m.group(3), @@ -367,6 +415,10 @@ for model in models: logger.error(f"Failed to load tokenizer for model {name}. Error: {e}") continue # Skip this model and continue with the next one in the loop + if not os.path.exists(f"models/ggml-vocab-{name}.gguf"): + logger.info(f"Skip vocab files for model {name}, no GGUF file found") + continue + with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f: for text in tests: f.write(f"{text}") diff --git a/models/ggml-vocab-bert-bge.gguf.inp b/models/ggml-vocab-bert-bge.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-bert-bge.gguf.inp +++ b/models/ggml-vocab-bert-bge.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-bert-bge.gguf.out b/models/ggml-vocab-bert-bge.gguf.out index a62566ce7..b1c49672f 100644 --- a/models/ggml-vocab-bert-bge.gguf.out +++ b/models/ggml-vocab-bert-bge.gguf.out @@ -1,5 +1,5 @@ 29464 2094 1018 1092 2706 - 11865 17875 + 9706 7959 2140 diff --git a/models/ggml-vocab-chameleon.gguf.inp b/models/ggml-vocab-chameleon.gguf.inp deleted file mode 100644 index 9baf7d77a..000000000 --- a/models/ggml-vocab-chameleon.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-chameleon.gguf.out b/models/ggml-vocab-chameleon.gguf.out deleted file mode 100644 index 7c5413fee..000000000 --- a/models/ggml-vocab-chameleon.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 17245 16604 16403 16604 33583 18355 - 16421 51153 - - 16604 - 16650 - 16650 16604 - 16581 - 16582 - 16582 16582 - 16582 16582 16582 - 16581 16582 - 31596 17394 - 34926 17394 - 31596 18671 - 34926 18671 - 34926 18671 16384 - 31596 16395 17394 16384 - 34926 16395 17394 16384 - 16811 16704 20410 16483 16631 16397 52854 - 16470 16399 16403 16407 16604 16406 35764 38185 51595 22592 26639 - 29479 23955 17012 20103 25527 27670 17408 19005 21473 24774 - 54254 42231 48084 29409 16617 61889 29409 16608 21954 16628 21954 16499 58445 29409 16607 58445 21954 16479 42231 21954 16611 21954 16607 21954 16633 21954 16611 29409 16607 21954 16615 - 52351 16604 16391 25825 16392 23686 16498 39161 18885 16618 16488 30853 16604 16391 54124 17153 25134 16656 18476 26169 16895 16392 62193 16611 16604 16391 24664 17153 57169 16721 16872 17073 17304 28729 16392 - 31596 - 34926 - 16650 31596 - 16650 34926 - 16696 31596 - 16696 31596 16582 16696 31596 - 16604 16391 - 16582 16604 16412 - 16390 22623 - 31596 16395 16712 16390 16828 16384 17674 16769 16732 23686 16607 16604 16414 24427 16623 41809 16495 28999 36469 45292 30197 16400 16402 16400 16403 16400 16404 16400 43969 65211 16636 - 16384 16384 16384 16384 16384 16384 - 16402 - 16402 16402 - 16402 16402 16402 - 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 16402 16402 16402 16402 - 16418 19038 16639 16448 24315 33727 16467 - 18765 17981 - 16582 16604 16582 16582 16604 16582 16582 16582 16604 16581 16604 16581 16581 16604 16581 16582 16650 16582 16650 16604 16582 16696 16582 16696 16604 16582 52351 16604 16391 25825 16392 23686 16498 39161 18885 16618 16488 30853 16604 16391 54124 17153 25134 16656 18476 26169 16895 16392 62193 16611 20410 16483 16631 18885 16483 16631 16604 16402 16604 16402 16402 16604 16402 16402 16402 16604 16402 16402 16402 16402 16604 16402 16402 16402 16402 16402 16604 16402 16402 16402 16402 16402 16402 16604 16402 16402 16402 16402 16402 16402 16402 16604 16402 16402 16402 16402 16402 16402 16402 16402 16604 16402 16397 16402 16604 16402 16397 16397 16402 16604 16402 16397 16397 16397 16402 16604 54254 42231 48084 29409 16617 61889 29409 16608 21954 16628 21954 16499 58445 29409 16607 58445 21954 16479 42231 21954 16611 27683 16607 16604 16414 24427 16623 41809 16495 28999 36469 45292 30197 16400 16402 16400 16403 16400 16404 16400 43969 65211 16636 16604 16396 16396 16396 16396 16396 16396 16412 16412 16412 16412 16412 16412 16412 27268 23955 17012 20103 25527 27670 17408 19005 21473 24774 16604 16390 16390 16390 16390 16390 16390 16447 16447 16447 16447 16447 16447 16447 16385 16385 16385 16385 16397 16397 16397 16397 16397 16397 16384 16384 16384 16384 16384 16384 16414 16414 16414 16414 16414 16414 16687 16390 16690 16992 16604 16390 61797 16733 16390 16466 16986 16395 16604 16390 17879 16732 17811 16414 16604 16390 16428 16804 17811 16687 16390 16683 17190 16728 16395 16604 16390 16419 16732 16945 16991 25251 16414 17119 16390 38127 16641 16390 16459 16427 diff --git a/models/ggml-vocab-command-r.gguf.inp b/models/ggml-vocab-command-r.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-command-r.gguf.inp +++ b/models/ggml-vocab-command-r.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-command-r.gguf.out b/models/ggml-vocab-command-r.gguf.out index 3f6b41888..0e3af72eb 100644 --- a/models/ggml-vocab-command-r.gguf.out +++ b/models/ggml-vocab-command-r.gguf.out @@ -1,5 +1,5 @@ 2536 228 27 228 22957 6983 - 45 193433 + 90711 87 20910 228 1667 diff --git a/models/ggml-vocab-deepseek-coder.gguf.inp b/models/ggml-vocab-deepseek-coder.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-deepseek-coder.gguf.inp +++ b/models/ggml-vocab-deepseek-coder.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-deepseek-coder.gguf.out b/models/ggml-vocab-deepseek-coder.gguf.out index 52c4111a1..ef6bc5b8a 100644 --- a/models/ggml-vocab-deepseek-coder.gguf.out +++ b/models/ggml-vocab-deepseek-coder.gguf.out @@ -1,5 +1,5 @@ 1050 207 19 207 19192 4217 - 37 32009 71 6247 + 125 213 26862 282 207 243 diff --git a/models/ggml-vocab-deepseek-llm.gguf.inp b/models/ggml-vocab-deepseek-llm.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-deepseek-llm.gguf.inp +++ b/models/ggml-vocab-deepseek-llm.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-deepseek-llm.gguf.out b/models/ggml-vocab-deepseek-llm.gguf.out index 0191b7a11..f9d49c9af 100644 --- a/models/ggml-vocab-deepseek-llm.gguf.out +++ b/models/ggml-vocab-deepseek-llm.gguf.out @@ -1,5 +1,5 @@ 1052 207 19 207 19109 4223 - 37 100014 71 6245 + 82077 26723 282 207 243 diff --git a/models/ggml-vocab-deepseek-r1-qwen.gguf.inp b/models/ggml-vocab-deepseek-r1-qwen.gguf.inp deleted file mode 100644 index 9baf7d77a..000000000 --- a/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-deepseek-r1-qwen.gguf.out b/models/ggml-vocab-deepseek-r1-qwen.gguf.out deleted file mode 100644 index 18b4b45cd..000000000 --- a/models/ggml-vocab-deepseek-r1-qwen.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 1122 220 19 220 26062 3951 - 37 50753 261 - - 220 - 256 - 262 - 197 - 198 - 271 - 1406 - 1572 - 9707 1879 - 21927 1879 - 9707 4337 - 21927 4337 - 21927 4337 0 - 9707 11 1879 0 - 21927 11 1879 0 - 419 374 11162 99 247 13 10821 - 86 15 19 23 220 22 83 1963 41808 11472 2940 16739 - 78762 14144 1456 13073 63471 33594 3038 133178 79012 - 146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 147805 148301 147270 44258 223 146848 - 145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 320 3243 42365 429 702 1181 1828 3950 8 - 9707 - 21927 - 220 21927 - 256 21927 - 262 21927 - 262 21927 198 262 21927 - 320 - 198 284 - 6 11385 - 9707 11 379 64848 0 2585 525 498 26525 223 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216 - 17085 2928 - 18 - 18 18 - 18 18 18 - 18 18 18 18 - 18 18 18 18 18 - 18 18 18 18 18 18 - 18 18 18 18 18 18 18 - 18 18 18 18 18 18 18 18 - 18 18 18 18 18 18 18 18 18 - 34 90063 128324 - 2560 2347 - 198 4710 14731 65497 7847 1572 2303 78672 10947 145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 11162 99 247 149955 220 18 220 18 18 220 18 18 18 220 18 18 18 18 220 18 18 18 18 18 220 18 18 18 18 18 18 220 18 18 18 18 18 18 18 220 18 18 18 18 18 18 18 18 220 18 13 18 220 18 496 18 220 18 1112 18 220 146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 144534 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216 55460 53237 18658 14144 1456 13073 63471 33594 3038 133178 79012 3355 4605 4605 13874 13874 73594 3014 3014 28149 17085 2928 26610 7646 358 3003 1012 364 83 813 566 594 1052 11 364 787 498 2704 30 364 44 537 2704 358 3278 1281 432 11 364 35 498 1075 1045 15243 30 1205 6 42612 264 63866 43 diff --git a/models/ggml-vocab-falcon.gguf.inp b/models/ggml-vocab-falcon.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-falcon.gguf.inp +++ b/models/ggml-vocab-falcon.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-falcon.gguf.out b/models/ggml-vocab-falcon.gguf.out index 64a48d97f..6319de60e 100644 --- a/models/ggml-vocab-falcon.gguf.out +++ b/models/ggml-vocab-falcon.gguf.out @@ -1,5 +1,5 @@ 878 204 31 3068 133 2137 - 28611 132 30042 + 34502 18614 286 204 258 diff --git a/models/ggml-vocab-gpt-2.gguf.inp b/models/ggml-vocab-gpt-2.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-gpt-2.gguf.inp +++ b/models/ggml-vocab-gpt-2.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-gpt-2.gguf.out b/models/ggml-vocab-gpt-2.gguf.out index 17a13bdfc..6464ded3d 100644 --- a/models/ggml-vocab-gpt-2.gguf.out +++ b/models/ggml-vocab-gpt-2.gguf.out @@ -1,5 +1,5 @@ 798 604 25208 1933 - 37 9116 71 11751 + 127 226 79 69 417 220 220 220 diff --git a/models/ggml-vocab-gpt-4o.gguf.inp b/models/ggml-vocab-gpt-4o.gguf.inp deleted file mode 100644 index 9baf7d77a..000000000 --- a/models/ggml-vocab-gpt-4o.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-gpt-4o.gguf.out b/models/ggml-vocab-gpt-4o.gguf.out deleted file mode 100644 index 478df726f..000000000 --- a/models/ggml-vocab-gpt-4o.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 1165 220 19 220 27124 5503 - 37 19194 259 - - 220 - 256 - 271 - 197 - 198 - 279 - 2499 - 2775 - 13225 2375 - 32949 2375 - 13225 5922 - 32949 5922 - 32949 5922 0 - 13225 11 2375 0 - 32949 11 2375 0 - 495 382 9552 99 247 13 17159 - 86 45404 220 22 10191 2852 22924 4750 6916 - 3907 53641 1235 185386 8118 - 11400 107516 15867 20804 22851 134178 77431 32010 104312 37984 16329 27751 89335 - 112927 222 350 14559 8 22861 114 2524 64364 104 15148 350 76466 166700 121942 780 8 91349 350 7393 74471 484 853 1617 2316 6602 8 - 13225 - 32949 - 220 32949 - 256 32949 - 271 32949 - 271 32949 198 271 32949 - 350 - 198 314 - 6 6837 - 13225 11 342 70653 0 3253 553 481 22861 223 1423 7522 18165 2178 34058 22369 16412 32999 16 867 8208 - 147475 - 18 - 2546 - 15517 - 15517 18 - 15517 2546 - 15517 15517 - 15517 15517 18 - 15517 15517 2546 - 15517 15517 15517 - 34 60213 53904 - 2960 3098 - 126470 25980 160432 16609 2775 4066 172261 19432 112927 222 350 14559 8 22861 114 2524 64364 104 15148 350 76466 166700 121942 780 8 91349 9552 99 247 4103 99 247 220 18 220 2546 220 15517 220 15517 18 220 15517 2546 220 15517 15517 220 15517 15517 18 220 15517 15517 2546 220 18 13 18 220 18 485 18 220 18 1008 18 44735 107516 15867 20804 22851 134178 77431 32010 104312 156437 1423 7522 18165 2178 34058 22369 16412 32999 16 867 8208 105024 106657 1967 53641 1235 185386 8118 22434 39336 26178 26178 168394 194663 27271 147475 25883 6961 9790 1339 461 83 1280 19016 1354 11 461 1099 481 3239 30 461 44 625 3239 17291 1520 480 11 461 35 481 1299 1236 17966 30 1416 6 27493 261 54602 43 diff --git a/models/ggml-vocab-llama-bpe.gguf.inp b/models/ggml-vocab-llama-bpe.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-llama-bpe.gguf.inp +++ b/models/ggml-vocab-llama-bpe.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-llama-bpe.gguf.out b/models/ggml-vocab-llama-bpe.gguf.out index 4b35cf93f..a77376625 100644 --- a/models/ggml-vocab-llama-bpe.gguf.out +++ b/models/ggml-vocab-llama-bpe.gguf.out @@ -1,5 +1,5 @@ 1142 220 19 220 27154 4038 - 37 51853 261 + 88075 16276 301 220 256 diff --git a/models/ggml-vocab-llama-spm.gguf.inp b/models/ggml-vocab-llama-spm.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-llama-spm.gguf.inp +++ b/models/ggml-vocab-llama-spm.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-llama-spm.gguf.out b/models/ggml-vocab-llama-spm.gguf.out index 93aacf8ba..2a71a6ef8 100644 --- a/models/ggml-vocab-llama-spm.gguf.out +++ b/models/ggml-vocab-llama-spm.gguf.out @@ -1,5 +1,5 @@ 474 287 29871 29946 29871 30226 7378 - 383 4000 261 + 11585 7810 295 259 1678 diff --git a/models/ggml-vocab-llama4.gguf.inp b/models/ggml-vocab-llama4.gguf.inp deleted file mode 100644 index 9baf7d77a..000000000 --- a/models/ggml-vocab-llama4.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-llama4.gguf.out b/models/ggml-vocab-llama4.gguf.out deleted file mode 100644 index 7ca46ce59..000000000 --- a/models/ggml-vocab-llama4.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 1190 220 32 220 18215 7112 - 50 16800 258 - - 220 - 256 - 277 - 197 - 198 - 368 - 2946 - 3271 - 19873 3817 - 39715 3817 - 19873 7353 - 39715 7353 - 39715 7353 13 - 19873 24 3817 13 - 39715 24 3817 13 - 544 373 9522 112 247 26 36315 - 99 39923 220 35 9607 21498 21470 3679 9433 - 1595 7653 633 79829 34051 1636 - 8755 102595 115960 21125 148305 96819 102816 39048 14105 22528 160234 - 114590 222 330 14879 21 51358 127 12817 93293 117 24204 330 68239 881 120327 170428 21 89101 330 7384 88230 511 947 1492 3742 7233 21 - 19873 - 39715 - 220 39715 - 256 39715 - 277 39715 - 277 39715 198 277 39715 - 330 - 198 319 - 19 7359 - 19873 24 386 87799 13 2403 583 650 51358 223 1663 155736 1522 42056 7544 13336 28785 29 4412 20645 - 17931 4959 - 31 - 1922 - 12325 - 12325 31 - 12325 1922 - 12325 12325 - 12325 12325 31 - 12325 12325 1922 - 12325 12325 12325 - 47 19811 12077 - 3260 3579 - 198 7283 51499 191231 20192 3271 3322 9287 2143 17860 114590 222 330 14879 21 51358 127 12817 93293 117 24204 330 68239 881 120327 170428 21 89101 9522 112 247 172394 247 220 31 220 1922 220 12325 220 12325 31 220 12325 1922 220 12325 12325 220 12325 12325 31 220 12325 12325 1922 220 31 26 31 220 31 396 31 220 31 1043 31 117131 102595 115960 21125 148305 96819 102816 80883 223 1663 155736 1522 42056 7544 13336 28785 29 4412 20645 79745 150278 117079 633 79829 34051 1636 25611 41990 109428 1488 91054 24072 17931 4959 29795 9296 16517 1806 481 96 1386 36633 1609 24 481 1109 650 5074 43 481 57 702 5074 27088 2170 536 24 481 48 650 1933 1696 30262 43 1665 19 32818 262 27236 56 diff --git a/models/ggml-vocab-mpt.gguf.inp b/models/ggml-vocab-mpt.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-mpt.gguf.inp +++ b/models/ggml-vocab-mpt.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-mpt.gguf.out b/models/ggml-vocab-mpt.gguf.out index 372c751bf..ca62669ad 100644 --- a/models/ggml-vocab-mpt.gguf.out +++ b/models/ggml-vocab-mpt.gguf.out @@ -1,5 +1,5 @@ 728 577 24142 2607 - 39 26288 6554 + 37515 18569 293 209 50276 diff --git a/models/ggml-vocab-nomic-bert-moe.gguf.inp b/models/ggml-vocab-nomic-bert-moe.gguf.inp deleted file mode 100644 index 9baf7d77a..000000000 --- a/models/ggml-vocab-nomic-bert-moe.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-nomic-bert-moe.gguf.out b/models/ggml-vocab-nomic-bert-moe.gguf.out deleted file mode 100644 index c31c61092..000000000 --- a/models/ggml-vocab-nomic-bert-moe.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 17 297 201 78660 21775 - 72805 4097 56 - - - - - - - - - - 35378 8999 - 35378 8999 - 35378 6661 - 35378 6661 - 35378 6661 38 - 35378 4 8999 38 - 35378 4 8999 38 - 903 83 6 3 5 238 6366 - 148 7709 1019 361 458 134362 104 7 71 420 1132 - 14271 29 117152 - 6 149561 78270 48967 64254 7616 81705 - 6 247206 15 33176 16 6 247442 6 3 15755 15 144227 8705 18255 40292 158 4460 33 27686 16 6 142325 15 191 538 28 121505 450 1556 6863 10002 47 1098 16 - 35378 - 35378 - 35378 - 35378 - 35378 - 35378 35378 - 15 - 2203 - 242 1615 - 35378 4 113 25 5584 38 11249 621 398 6 201344 705 23638 213 9007 133 1879 2681 2592 135224 1906 6087 - 6 90827 - 138 - 3912 - 6 66000 - 138 66000 - 3912 66000 - 6 66000 66000 - 138 66000 66000 - 3912 66000 66000 - 6 66000 66000 66000 - 199152 3763 - 17116 99397 - 6 247206 15 33176 16 6 247442 6 3 15755 15 144227 8705 18255 40292 158 4460 33 27686 16 6 142325 6 3 138 3912 6 66000 138 66000 3912 66000 6 66000 66000 138 66000 66000 3912 66000 66000 80308 1031 5 363 138 27 363 6 149561 78270 48967 201344 705 23638 213 9007 133 1879 2681 2592 135224 1906 6087 6 110405 1369 69112 69112 69112 14271 29 117152 5106 4765 4765 1135 164721 164721 164721 58 58 58 58 2551 90827 32 85908 87 25 272 2809 242 18 18345 764 25 7 2685 4 242 11766 398 9077 32 242 594 959 9077 87 25 1181 3249 442 4 242 397 398 1884 3060 26156 32 1401 25 26455 10 25 141 866 diff --git a/models/ggml-vocab-phi-3.gguf.inp b/models/ggml-vocab-phi-3.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-phi-3.gguf.inp +++ b/models/ggml-vocab-phi-3.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-phi-3.gguf.out b/models/ggml-vocab-phi-3.gguf.out index 93aacf8ba..2a71a6ef8 100644 --- a/models/ggml-vocab-phi-3.gguf.out +++ b/models/ggml-vocab-phi-3.gguf.out @@ -1,5 +1,5 @@ 474 287 29871 29946 29871 30226 7378 - 383 4000 261 + 11585 7810 295 259 1678 diff --git a/models/ggml-vocab-pixtral.gguf.inp b/models/ggml-vocab-pixtral.gguf.inp deleted file mode 100644 index 9baf7d77a..000000000 --- a/models/ggml-vocab-pixtral.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-pixtral.gguf.out b/models/ggml-vocab-pixtral.gguf.out deleted file mode 100644 index 53309d1bc..000000000 --- a/models/ggml-vocab-pixtral.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 2014 1032 1052 1032 28504 6972 - 1070 7088 1258 - - 1032 - 1256 - 1293 - 1009 - 1010 - 1267 - 4688 - 1009 1010 - 22177 4304 - 45383 4304 - 22177 5325 - 45383 5325 - 45383 5325 1033 - 22177 1044 4304 1033 - 45383 1044 4304 1033 - 1593 1395 119685 1166 1153 1046 51228 - 1119 1048 1052 1056 1032 1055 17391 23216 30203 7785 17279 - 3337 30757 1902 4200 63073 3671 - 1225 1158 1128 1225 1158 1182 1225 1158 1147 1225 1159 1139 1225 1158 1143 1225 1159 1130 1225 1158 1150 1225 1158 1183 1225 1158 1159 1225 21359 1225 1158 1159 1225 1158 1162 1225 1158 1182 1225 1158 1133 1225 1158 1129 1225 1158 1155 1225 1158 1133 1225 21359 1225 1158 1137 - 1240 1159 1154 1128 1319 13052 1041 119685 1152 1182 29568 1240 1159 1140 1171 1239 1184 1143 1319 88181 1873 3659 1275 56421 1621 1041 126241 1133 1319 11234 1873 26303 1455 1934 2246 3754 10835 1041 - 22177 - 45383 - 1032 45383 - 1256 45383 - 1293 45383 - 1293 45383 1010 1293 45383 - 1319 - 1010 1376 - 1039 4033 - 22177 1044 1404 48054 1033 3075 1584 1636 119685 1152 1129 3082 26060 2998 63614 82278 1049 1051 1049 1052 1049 1053 1049 6434 6749 - 7290 7290 7290 - 1051 - 1051 1051 - 1051 1051 1051 - 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 1051 1051 1051 1051 - 1067 59503 28783 - 3724 4058 - 1010 1032 1267 1032 4688 1032 17152 1458 29356 1010 1256 1010 1293 1010 1260 1010 1652 1010 1240 1159 1154 1128 1319 13052 1041 119685 1152 1182 29568 1240 1159 1140 1171 1239 1184 1143 1319 88181 1873 3659 1275 56421 1621 1041 126241 1133 119685 1166 1153 1240 1159 1166 1153 1032 1051 1032 1051 1051 1032 1051 1051 1051 1032 1051 1051 1051 1051 1032 1051 1051 1051 1051 1051 1032 1051 1051 1051 1051 1051 1051 1032 1051 1051 1051 1051 1051 1051 1051 1032 1051 1051 1051 1051 1051 1051 1051 1051 1032 1051 1046 1051 1032 1051 1791 1051 1032 1051 2880 1051 71881 1158 1128 1225 1158 1182 1225 1158 1147 1225 1159 1139 1225 1158 1143 1225 1159 1130 1225 1158 1150 1225 1158 1183 1225 1158 1159 1225 21359 1225 1158 1159 1225 1158 1162 1225 1158 1182 1225 1158 1133 1240 1159 1152 1129 3082 26060 2998 63614 82278 1049 1051 1049 1052 1049 1053 1049 6434 6749 45577 1045 6626 43555 2843 30757 1902 4200 63073 3671 14931 20040 20040 1657 1657 1975 14135 14135 83923 7290 7290 7290 45509 45509 45509 1362 6483 2151 1576 1116 2189 1514 1681 2156 1044 1576 3609 1636 5257 1063 1576 1077 1605 5257 1362 7534 3180 1494 1044 1576 1068 1636 2479 2269 26883 1063 2837 1039 45654 1261 54297 1076 diff --git a/models/ggml-vocab-qwen2.gguf.inp b/models/ggml-vocab-qwen2.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-qwen2.gguf.inp +++ b/models/ggml-vocab-qwen2.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-qwen2.gguf.out b/models/ggml-vocab-qwen2.gguf.out index 18b4b45cd..595d59a44 100644 --- a/models/ggml-vocab-qwen2.gguf.out +++ b/models/ggml-vocab-qwen2.gguf.out @@ -1,5 +1,5 @@ 1122 220 19 220 26062 3951 - 37 50753 261 + 86975 15897 301 220 256 diff --git a/models/ggml-vocab-refact.gguf.inp b/models/ggml-vocab-refact.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-refact.gguf.inp +++ b/models/ggml-vocab-refact.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-refact.gguf.out b/models/ggml-vocab-refact.gguf.out index 63d8305c3..f13dda52c 100644 --- a/models/ggml-vocab-refact.gguf.out +++ b/models/ggml-vocab-refact.gguf.out @@ -1,5 +1,5 @@ 4833 225 38 225 143 140 17723 - 56 2006 3935 265 + 144 231 7132 342 225 261 diff --git a/models/ggml-vocab-roberta-bpe.gguf.inp b/models/ggml-vocab-roberta-bpe.gguf.inp deleted file mode 100644 index 9baf7d77a..000000000 --- a/models/ggml-vocab-roberta-bpe.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-roberta-bpe.gguf.out b/models/ggml-vocab-roberta-bpe.gguf.out deleted file mode 100644 index f181ac3dc..000000000 --- a/models/ggml-vocab-roberta-bpe.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 2550 204 18430 377 - 597 2768 298 8564 - - 1437 - 1437 1437 - 1437 1437 1437 - 50117 - 50118 - 50140 - 50140 50118 - 50117 50118 - 31414 232 - 20920 232 - 31414 623 - 20920 623 - 20920 623 328 - 31414 6 232 328 - 20920 6 232 328 - 42 16 8103 18164 27 4 49317 - 605 40976 262 10109 18474 385 29 36807 6455 - 36765 25482 22063 23171 34251 18697 10809 26161 18697 3602 22063 27969 40966 25417 15264 26161 24269 36709 41171 35328 - 1376 17772 7471 1376 17772 19002 1376 17772 9085 1376 4333 13859 1376 17772 9357 1376 4333 9264 1376 17772 25448 1376 17772 18400 1376 17772 4333 1376 4333 10172 1376 17772 4333 1376 17772 7258 1376 17772 19002 1376 17772 5782 1376 17772 10172 1376 17772 3726 1376 17772 5782 1376 4333 10172 1376 17772 23171 - 6569 15113 7471 36 21113 43 17841 19002 17 8384 6569 14285 4958 12605 36 34654 2841 4203 354 10146 26511 1070 43 36174 5782 36 8338 21554 14 34 63 308 19233 43 - 31414 - 20920 - 1437 20920 - 1437 1437 20920 - 1437 1437 1437 20920 - 1437 1437 1437 20920 50118 1437 1437 1437 20920 - 36 - 50118 5457 - 108 3567 - 31414 6 1423 108 1250 328 1336 32 47 17841 10172 17487 47876 3602 48617 15264 46537 11423 27326 48494 8210 49233 1558 1570 27761 49429 43251 10809 17772 - 32376 12846 - 246 - 3103 - 25631 - 46152 - 3103 25631 - 46152 3103 - 46152 25631 - 46152 46152 - 46152 3103 25631 - 347 1376 2023 12410 102 16376 1376 2023 6382 90 - 9553 5954 - 50118 1437 50140 1437 50140 50118 1437 50117 1437 50117 50117 1437 50117 50118 1437 1437 50118 1437 1437 1437 50118 1437 1437 1437 1437 50118 1437 1437 1437 1437 1437 50118 6569 15113 7471 36 21113 43 17841 19002 17 8384 6569 14285 4958 12605 36 34654 2841 4203 354 10146 26511 1070 43 36174 5782 8103 18164 27 6569 18164 27 155 2357 30242 155 25631 30242 3103 30242 25631 30242 46152 30242 3103 25631 155 4 246 155 7586 246 155 734 246 25974 17772 7471 1376 17772 19002 1376 17772 9085 1376 4333 13859 1376 17772 9357 1376 4333 9264 1376 17772 25448 1376 17772 18400 1376 17772 4333 1376 4333 10172 1376 17772 4333 1376 17772 7258 1376 17772 19002 1376 17772 5782 18636 10172 17487 47876 3602 48617 15264 46537 11423 27326 48494 8210 49233 1558 1570 27761 49429 43251 10809 17772 36738 48332 47463 18697 10809 25482 22063 23171 34251 18697 10809 26161 18697 3602 22063 27969 40966 25417 15264 26161 24269 36709 41171 35328 128 49690 108 49972 49519 12905 48149 48149 43796 32376 12846 27282 28749 38 348 57 128 41042 37 18 89 6 128 4629 47 686 116 128 448 45 686 38 581 146 24 6 128 495 47 101 103 6845 116 166 108 30660 10 108 462 574 diff --git a/models/ggml-vocab-starcoder.gguf.inp b/models/ggml-vocab-starcoder.gguf.inp index 9baf7d77a..86b934e40 100644 --- a/models/ggml-vocab-starcoder.gguf.inp +++ b/models/ggml-vocab-starcoder.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-starcoder.gguf.out b/models/ggml-vocab-starcoder.gguf.out index 87e2465d3..4698e2c3c 100644 --- a/models/ggml-vocab-starcoder.gguf.out +++ b/models/ggml-vocab-starcoder.gguf.out @@ -1,5 +1,5 @@ 4850 244 57 244 162 159 17722 - 75 2022 3943 284 + 163 250 7146 361 244 280 diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 62a9f5842..83f7d1a45 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -92,12 +92,14 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-nomic-bert-moe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-nomic-bert-moe.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-phi-3.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-qwen2.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf) +# TODO: missing HF tokenizer for this model in convert_hf_to_gguf_update.py, see https://github.com/ggml-org/llama.cpp/pull/13847 +# llama_test(test-tokenizer-0 NAME test-tokenizer-0-nomic-bert-moe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-nomic-bert-moe.gguf) + if (LLAMA_LLGUIDANCE) llama_build_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf) endif () From db38704f0133be7832123495fa8fc2601ea999d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Fri, 30 May 2025 14:50:43 +0200 Subject: [PATCH 15/21] convert : fix rwkv bos/eos token (#13844) --- convert_hf_to_gguf.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 54738b862..ab0f0e0ea 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1047,6 +1047,10 @@ class TextModel(ModelBase): special_vocab.chat_template = "rwkv-world" # hack: Add '\n\n' as the EOT token to make it chat normally special_vocab._set_special_token("eot", 261) + # hack: Override these as they have already been set (incorrectly) + special_vocab.special_token_ids["bos"] = 0 + special_vocab.special_token_ids["eos"] = 0 + special_vocab.add_to_gguf(self.gguf_writer) def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int): From 53f925074de02c5304b00c14b4d6d8910c58667d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 30 May 2025 16:25:45 +0300 Subject: [PATCH 16/21] sync : vendor (#13901) * sync : vendor ggml-ci * cont : fix httplib version ggml-ci * cont : fix lint * cont : fix lint * vendor : move to common folder /vendor ggml-ci * cont : fix lint * cont : move httplib to /vendor + use json_fwd.hpp ggml-ci * cont : fix server build ggml-ci * cont : add missing headers ggml-ci * cont : header clean-up ggml-ci --- .editorconfig | 2 +- common/CMakeLists.txt | 13 +- common/arg.cpp | 10 +- common/chat-parser.h | 3 +- common/chat.cpp | 8 +- common/json-partial.cpp | 11 +- common/json-partial.h | 3 +- common/json-schema-to-grammar.cpp | 3 +- common/json-schema-to-grammar.h | 8 +- scripts/sync_vendor.py | 22 + tests/test-chat.cpp | 11 +- tests/test-grammar-integration.cpp | 2 + tests/test-json-schema-to-grammar.cpp | 2 + tools/mtmd/CMakeLists.txt | 3 +- tools/mtmd/mtmd-helper.cpp | 4 +- tools/run/run.cpp | 17 +- tools/server/CMakeLists.txt | 1 - tools/server/server.cpp | 3 - tools/server/utils.hpp | 7 +- tools/tts/tts.cpp | 4 +- .../server => vendor/cpp-httplib}/httplib.h | 18 +- .../vendor => vendor/miniaudio}/miniaudio.h | 0 {common => vendor}/minja/chat-template.hpp | 2 +- {common => vendor}/minja/minja.hpp | 2 +- {common => vendor/nlohmann}/json.hpp | 2834 +++++++++++------ vendor/nlohmann/json_fwd.hpp | 187 ++ {tools/mtmd/vendor => vendor/stb}/stb_image.h | 0 27 files changed, 2084 insertions(+), 1096 deletions(-) create mode 100755 scripts/sync_vendor.py rename {tools/server => vendor/cpp-httplib}/httplib.h (99%) rename {tools/mtmd/vendor => vendor/miniaudio}/miniaudio.h (100%) rename {common => vendor}/minja/chat-template.hpp (99%) rename {common => vendor}/minja/minja.hpp (99%) rename {common => vendor/nlohmann}/json.hpp (94%) create mode 100644 vendor/nlohmann/json_fwd.hpp rename {tools/mtmd/vendor => vendor/stb}/stb_image.h (100%) diff --git a/.editorconfig b/.editorconfig index 47111c72d..c90b171f5 100644 --- a/.editorconfig +++ b/.editorconfig @@ -49,6 +49,6 @@ charset = unset trim_trailing_whitespace = unset insert_final_newline = unset -[tools/mtmd/vendor/miniaudio.h] +[vendor/miniaudio/miniaudio.h] trim_trailing_whitespace = unset insert_final_newline = unset diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index dac4cc770..564af1448 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -58,23 +58,20 @@ add_library(${TARGET} STATIC arg.cpp arg.h base64.hpp - chat.cpp - chat.h chat-parser.cpp chat-parser.h + chat.cpp + chat.h common.cpp common.h console.cpp console.h - json-schema-to-grammar.cpp - json.hpp - json-partial.h json-partial.cpp + json-partial.h + json-schema-to-grammar.cpp llguidance.cpp log.cpp log.h - minja/chat-template.hpp - minja/minja.hpp ngram-cache.cpp ngram-cache.h regex-partial.cpp @@ -147,7 +144,7 @@ if (LLAMA_LLGUIDANCE) set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} llguidance ${LLGUIDANCE_PLATFORM_LIBS}) endif () -target_include_directories(${TARGET} PUBLIC .) +target_include_directories(${TARGET} PUBLIC . ../vendor) target_compile_features (${TARGET} PUBLIC cxx_std_17) target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads) diff --git a/common/arg.cpp b/common/arg.cpp index 69a58364f..b6cb0aa91 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1,10 +1,11 @@ -#include "gguf.h" // for reading GGUF splits #include "arg.h" +#include "chat.h" #include "common.h" +#include "gguf.h" // for reading GGUF splits +#include "json-schema-to-grammar.h" #include "log.h" #include "sampling.h" -#include "chat.h" // fix problem with std::min and std::max #if defined(_WIN32) @@ -15,6 +16,9 @@ #include #endif +#define JSON_ASSERT GGML_ASSERT +#include + #include #include #include @@ -34,8 +38,6 @@ #include #endif -#include "json-schema-to-grammar.h" - using json = nlohmann::ordered_json; std::initializer_list mmproj_examples = { diff --git a/common/chat-parser.h b/common/chat-parser.h index 5d53f2df1..7ee355056 100644 --- a/common/chat-parser.h +++ b/common/chat-parser.h @@ -2,9 +2,10 @@ #include "chat.h" #include "json-partial.h" -#include "json.hpp" #include "regex-partial.h" +#include + #include #include #include diff --git a/common/chat.cpp b/common/chat.cpp index 7584639b0..f1ab4c85a 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1,13 +1,14 @@ #include "chat.h" #include "chat-parser.h" #include "common.h" +#include "json-partial.h" #include "json-schema-to-grammar.h" #include "log.h" -#include "json-partial.h" -#include "minja/chat-template.hpp" -#include "minja/minja.hpp" #include "regex-partial.h" +#include +#include + #include #include #include @@ -16,7 +17,6 @@ #include #include - static std::string format_time(const std::chrono::system_clock::time_point & now, const std::string & format) { auto time = std::chrono::system_clock::to_time_t(now); auto local_time = *std::localtime(&time); diff --git a/common/json-partial.cpp b/common/json-partial.cpp index 7591a8e4c..d9d916998 100644 --- a/common/json-partial.cpp +++ b/common/json-partial.cpp @@ -1,9 +1,10 @@ -#include -#include "ggml.h" -#include "log.h" -#include +#include "json-partial.h" -#include +#include "log.h" + +#include + +#include using json = nlohmann::ordered_json; diff --git a/common/json-partial.h b/common/json-partial.h index 854db6a3a..f63356dc4 100644 --- a/common/json-partial.h +++ b/common/json-partial.h @@ -1,5 +1,6 @@ #pragma once -#include + +#include // Healing marker (empty if the JSON was fully parsed / wasn't healed). struct common_healing_marker { diff --git a/common/json-schema-to-grammar.cpp b/common/json-schema-to-grammar.cpp index 5b3059c2f..d38a74f95 100644 --- a/common/json-schema-to-grammar.cpp +++ b/common/json-schema-to-grammar.cpp @@ -1,8 +1,9 @@ #include "json-schema-to-grammar.h" #include "common.h" +#include + #include -#include #include #include #include diff --git a/common/json-schema-to-grammar.h b/common/json-schema-to-grammar.h index 4613f5d9f..362991b54 100644 --- a/common/json-schema-to-grammar.h +++ b/common/json-schema-to-grammar.h @@ -1,9 +1,9 @@ #pragma once -#include "ggml.h" -// Change JSON_ASSERT from assert() to GGML_ASSERT: -#define JSON_ASSERT GGML_ASSERT -#include "json.hpp" +#include + +#include +#include std::string json_schema_to_grammar(const nlohmann::ordered_json & schema, bool force_gbnf = false); diff --git a/scripts/sync_vendor.py b/scripts/sync_vendor.py new file mode 100755 index 000000000..1151c9f01 --- /dev/null +++ b/scripts/sync_vendor.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +import urllib.request + +vendor = { + "https://github.com/nlohmann/json/releases/latest/download/json.hpp": "vendor/nlohmann/json.hpp", + "https://github.com/nlohmann/json/releases/latest/download/json_fwd.hpp": "vendor/nlohmann/json_fwd.hpp", + + # sync manually + # "https://raw.githubusercontent.com/ochafik/minja/refs/heads/main/include/minja/minja.hpp": "vendor/minja/minja.hpp", + # "https://raw.githubusercontent.com/ochafik/minja/refs/heads/main/include/minja/chat-template.hpp": "vendor/minja/chat-template.hpp", + + "https://raw.githubusercontent.com/nothings/stb/refs/heads/master/stb_image.h": "vendor/stb/stb_image.h", + + "https://github.com/mackron/miniaudio/raw/refs/tags/0.11.22/miniaudio.h": "vendor/miniaudio/miniaudio.h", + + "https://raw.githubusercontent.com/yhirose/cpp-httplib/refs/tags/v0.20.1/httplib.h": "vendor/cpp-httplib/httplib.h", +} + +for url, filename in vendor.items(): + print(f"downloading {url} to {filename}") # noqa: NP100 + urllib.request.urlretrieve(url, filename) diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index 95d516991..cd5827aa3 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -5,16 +5,17 @@ // // cmake -B build && cmake --build build --parallel && ./build/bin/test-chat ../minja/build/tests/*.jinja 2>/dev/null // -#include -#include -#include -#include - #include "chat.h" #include "../src/unicode.h" #include "../src/llama-grammar.h" +#include + +#include +#include +#include + using json = nlohmann::ordered_json; static std::ostream & operator<<(std::ostream & os, const common_chat_msg_diff & diff) { diff --git a/tests/test-grammar-integration.cpp b/tests/test-grammar-integration.cpp index 8988c347e..6d64f0737 100644 --- a/tests/test-grammar-integration.cpp +++ b/tests/test-grammar-integration.cpp @@ -7,6 +7,8 @@ #include "../src/unicode.h" #include "../src/llama-grammar.h" +#include + #include #include #include diff --git a/tests/test-json-schema-to-grammar.cpp b/tests/test-json-schema-to-grammar.cpp index 38cf01d6d..78ee55e24 100755 --- a/tests/test-json-schema-to-grammar.cpp +++ b/tests/test-json-schema-to-grammar.cpp @@ -6,6 +6,8 @@ #include "../src/llama-grammar.h" +#include + #include #include #include diff --git a/tools/mtmd/CMakeLists.txt b/tools/mtmd/CMakeLists.txt index 33e251d3b..be5414526 100644 --- a/tools/mtmd/CMakeLists.txt +++ b/tools/mtmd/CMakeLists.txt @@ -23,8 +23,7 @@ add_library(mtmd_helper OBJECT target_link_libraries(mtmd_helper PRIVATE ggml llama mtmd ${CMAKE_THREAD_LIBS_INIT}) target_include_directories(mtmd_helper PUBLIC .) -target_include_directories(mtmd_helper PRIVATE ./vendor) -target_include_directories(mtmd_helper PRIVATE ../..) +target_include_directories(mtmd_helper PRIVATE ../../vendor) target_compile_features(mtmd_helper PRIVATE cxx_std_17) if (BUILD_SHARED_LIBS) diff --git a/tools/mtmd/mtmd-helper.cpp b/tools/mtmd/mtmd-helper.cpp index 058323818..64f03fd1e 100644 --- a/tools/mtmd/mtmd-helper.cpp +++ b/tools/mtmd/mtmd-helper.cpp @@ -27,10 +27,10 @@ #define MA_NO_ENGINE #define MA_NO_GENERATION #define MA_API static -#include "vendor/miniaudio.h" +#include "miniaudio/miniaudio.h" #define STB_IMAGE_IMPLEMENTATION -#include "vendor/stb_image.h" +#include "stb/stb_image.h" #define LOG_INF(...) fprintf(stdout, __VA_ARGS__) #define LOG_ERR(...) fprintf(stderr, __VA_ARGS__) diff --git a/tools/run/run.cpp b/tools/run/run.cpp index 702c307d8..4aef93863 100644 --- a/tools/run/run.cpp +++ b/tools/run/run.cpp @@ -1,3 +1,13 @@ +#include "chat.h" +#include "common.h" +#include "llama-cpp.h" +#include "log.h" + +#include "linenoise.cpp/linenoise.h" + +#define JSON_ASSERT GGML_ASSERT +#include + #if defined(_WIN32) # include # include @@ -24,13 +34,6 @@ #include #include -#include "chat.h" -#include "common.h" -#include "json.hpp" -#include "linenoise.cpp/linenoise.h" -#include "llama-cpp.h" -#include "log.h" - #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32) [[noreturn]] static void sigint_handler(int) { printf("\n" LOG_COL_DEFAULT); diff --git a/tools/server/CMakeLists.txt b/tools/server/CMakeLists.txt index 08597145c..8ad09380a 100644 --- a/tools/server/CMakeLists.txt +++ b/tools/server/CMakeLists.txt @@ -12,7 +12,6 @@ endif() set(TARGET_SRCS server.cpp utils.hpp - httplib.h ) set(PUBLIC_ASSETS index.html.gz diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 96d1e4adf..5d03dc3dc 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -11,9 +11,6 @@ #include "mtmd.h" #include "mtmd-helper.h" -// Change JSON_ASSERT from assert() to GGML_ASSERT: -#define JSON_ASSERT GGML_ASSERT -#include "json.hpp" // mime type for sending response #define MIMETYPE_JSON "application/json; charset=utf-8" diff --git a/tools/server/utils.hpp b/tools/server/utils.hpp index 58b679d75..f3e0392a4 100644 --- a/tools/server/utils.hpp +++ b/tools/server/utils.hpp @@ -7,17 +7,16 @@ #include "base64.hpp" #include "mtmd.h" #include "mtmd-helper.h" +#include "chat.h" // increase max payload length to allow use of larger context size #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 // disable Nagle's algorithm #define CPPHTTPLIB_TCP_NODELAY true -#include "httplib.h" +#include -// Change JSON_ASSERT from assert() to GGML_ASSERT: #define JSON_ASSERT GGML_ASSERT -#include "json.hpp" -#include "chat.h" +#include #include #include diff --git a/tools/tts/tts.cpp b/tools/tts/tts.cpp index a7c536eea..a71e9bf5b 100644 --- a/tools/tts/tts.cpp +++ b/tools/tts/tts.cpp @@ -5,7 +5,9 @@ #include "sampling.h" #include "log.h" #include "llama.h" -#include "json.hpp" + +#define JSON_ASSERT GGML_ASSERT +#include #include #include diff --git a/tools/server/httplib.h b/vendor/cpp-httplib/httplib.h similarity index 99% rename from tools/server/httplib.h rename to vendor/cpp-httplib/httplib.h index 0f981dc89..0aa4e6274 100644 --- a/tools/server/httplib.h +++ b/vendor/cpp-httplib/httplib.h @@ -8,7 +8,7 @@ #ifndef CPPHTTPLIB_HTTPLIB_H #define CPPHTTPLIB_HTTPLIB_H -#define CPPHTTPLIB_VERSION "0.20.0" +#define CPPHTTPLIB_VERSION "0.20.1" /* * Configuration @@ -145,6 +145,10 @@ #define CPPHTTPLIB_LISTEN_BACKLOG 5 #endif +#ifndef CPPHTTPLIB_MAX_LINE_LENGTH +#define CPPHTTPLIB_MAX_LINE_LENGTH 32768 +#endif + /* * Headers */ @@ -3067,6 +3071,11 @@ inline bool stream_line_reader::getline() { #endif for (size_t i = 0;; i++) { + if (size() >= CPPHTTPLIB_MAX_LINE_LENGTH) { + // Treat exceptionally long lines as an error to + // prevent infinite loops/memory exhaustion + return false; + } char byte; auto n = strm_.read(&byte, 1); @@ -6055,6 +6064,8 @@ inline void calc_actual_timeout(time_t max_timeout_msec, time_t duration_msec, auto actual_timeout_msec = (std::min)(max_timeout_msec - duration_msec, timeout_msec); + if (actual_timeout_msec < 0) { actual_timeout_msec = 0; } + actual_timeout_sec = actual_timeout_msec / 1000; actual_timeout_usec = (actual_timeout_msec % 1000) * 1000; } @@ -7327,8 +7338,9 @@ Server::process_request(Stream &strm, const std::string &remote_addr, } // Setup `is_connection_closed` method - req.is_connection_closed = [&]() { - return !detail::is_socket_alive(strm.socket()); + auto sock = strm.socket(); + req.is_connection_closed = [sock]() { + return !detail::is_socket_alive(sock); }; // Routing diff --git a/tools/mtmd/vendor/miniaudio.h b/vendor/miniaudio/miniaudio.h similarity index 100% rename from tools/mtmd/vendor/miniaudio.h rename to vendor/miniaudio/miniaudio.h diff --git a/common/minja/chat-template.hpp b/vendor/minja/chat-template.hpp similarity index 99% rename from common/minja/chat-template.hpp rename to vendor/minja/chat-template.hpp index c930a587a..ab5b521dd 100644 --- a/common/minja/chat-template.hpp +++ b/vendor/minja/chat-template.hpp @@ -22,7 +22,7 @@ #include #include -#include +#include using json = nlohmann::ordered_json; diff --git a/common/minja/minja.hpp b/vendor/minja/minja.hpp similarity index 99% rename from common/minja/minja.hpp rename to vendor/minja/minja.hpp index b3b00547d..f9658ddc0 100644 --- a/common/minja/minja.hpp +++ b/vendor/minja/minja.hpp @@ -29,7 +29,7 @@ #include #include -#include +#include using json = nlohmann::ordered_json; diff --git a/common/json.hpp b/vendor/nlohmann/json.hpp similarity index 94% rename from common/json.hpp rename to vendor/nlohmann/json.hpp index a858728c4..82d69f7c5 100644 --- a/common/json.hpp +++ b/vendor/nlohmann/json.hpp @@ -1,9 +1,9 @@ // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT /****************************************************************************\ @@ -34,10 +34,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -47,10 +47,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -59,20 +59,24 @@ #ifndef JSON_SKIP_LIBRARY_VERSION_CHECK #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) - #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 3 + #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 12 || NLOHMANN_JSON_VERSION_PATCH != 0 #warning "Already included a different version of the library!" #endif #endif #endif #define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) -#define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum) -#define NLOHMANN_JSON_VERSION_PATCH 3 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_MINOR 12 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_PATCH 0 // NOLINT(modernize-macro-to-enum) #ifndef JSON_DIAGNOSTICS #define JSON_DIAGNOSTICS 0 #endif +#ifndef JSON_DIAGNOSTIC_POSITIONS + #define JSON_DIAGNOSTIC_POSITIONS 0 +#endif + #ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 #endif @@ -83,6 +87,12 @@ #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS #endif +#if JSON_DIAGNOSTIC_POSITIONS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS _dp +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS +#endif + #if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp #else @@ -94,14 +104,15 @@ #endif // Construct the namespace ABI tags component -#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b -#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \ - NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) +#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) json_abi ## a ## b ## c +#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b, c) \ + NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) #define NLOHMANN_JSON_ABI_TAGS \ NLOHMANN_JSON_ABI_TAGS_CONCAT( \ NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ - NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON) + NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON, \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS) // Construct the namespace version component #define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ @@ -149,10 +160,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -162,6 +173,9 @@ #include // forward_list #include // inserter, front_inserter, end #include // map +#ifdef JSON_HAS_CPP_17 + #include // optional +#endif #include // string #include // tuple, make_tuple #include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible @@ -172,10 +186,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -192,10 +206,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -208,10 +222,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -220,10 +234,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -233,10 +247,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -320,11 +334,11 @@ NLOHMANN_JSON_NAMESPACE_END // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann -// SPDX-FileCopyrightText: 2016-2021 Evan Nemerson +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-FileCopyrightText: 2016 - 2021 Evan Nemerson // SPDX-License-Identifier: MIT /* Hedley - https://nemequ.github.io/hedley @@ -2384,15 +2398,20 @@ JSON_HEDLEY_DIAGNOSTIC_POP // C++ language standard detection // if the user manually specified the used c++ version this is skipped -#if !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) - #if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#if !defined(JSON_HAS_CPP_23) && !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) + #if (defined(__cplusplus) && __cplusplus > 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG > 202002L) + #define JSON_HAS_CPP_23 #define JSON_HAS_CPP_20 #define JSON_HAS_CPP_17 #define JSON_HAS_CPP_14 - #elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #elif (defined(__cplusplus) && __cplusplus > 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG > 201703L) + #define JSON_HAS_CPP_20 #define JSON_HAS_CPP_17 #define JSON_HAS_CPP_14 - #elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) + #elif (defined(__cplusplus) && __cplusplus > 201402L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 + #elif (defined(__cplusplus) && __cplusplus > 201103L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) #define JSON_HAS_CPP_14 #endif // the cpp 11 flag is always specified because it is the minimal required version @@ -2568,7 +2587,9 @@ JSON_HEDLEY_DIAGNOSTIC_POP template \ inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ { \ + /* NOLINTNEXTLINE(modernize-type-traits) we use C++11 */ \ static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + /* NOLINTNEXTLINE(modernize-avoid-c-arrays) we don't want to depend on */ \ static const std::pair m[] = __VA_ARGS__; \ auto it = std::find_if(std::begin(m), std::end(m), \ [e](const std::pair& ej_pair) -> bool \ @@ -2580,7 +2601,9 @@ JSON_HEDLEY_DIAGNOSTIC_POP template \ inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ { \ + /* NOLINTNEXTLINE(modernize-type-traits) we use C++11 */ \ static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + /* NOLINTNEXTLINE(modernize-avoid-c-arrays) we don't want to depend on */ \ static const std::pair m[] = __VA_ARGS__; \ auto it = std::find_if(std::begin(m), std::end(m), \ [&j](const std::pair& ej_pair) -> bool \ @@ -2743,42 +2766,146 @@ JSON_HEDLEY_DIAGNOSTIC_POP #define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1; #define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1); -#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1); +#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = !nlohmann_json_j.is_null() ? nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1) : nlohmann_json_default_obj.v1; /*! @brief macro @def NLOHMANN_DEFINE_TYPE_INTRUSIVE @since version 3.9.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_intrusive/ */ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT +@since version 3.11.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_intrusive/ +*/ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE +@since version 3.11.3 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_intrusive/ +*/ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } /*! @brief macro @def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE @since version 3.9.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_non_intrusive/ */ #define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } - -#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT +@since version 3.11.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_non_intrusive/ +*/ #define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE +@since version 3.11.3 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_non_intrusive/ +*/ +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE(Type, BaseType, ...) \ + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_WITH_DEFAULT +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_WITH_DEFAULT(Type, BaseType, ...) \ + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_ONLY_SERIALIZE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, BaseType, ...) \ + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE(Type, BaseType, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_WITH_DEFAULT +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, BaseType, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, BaseType, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } // inspired from https://stackoverflow.com/a/26745591 -// allows to call any std function as if (e.g. with begin): +// allows calling any std function as if (e.g., with begin): // using std::begin; begin(x); // // it allows using the detected idiom to retrieve the return type @@ -2939,10 +3066,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3014,10 +3141,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3056,10 +3183,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-FileCopyrightText: 2018 The Abseil Authors // SPDX-License-Identifier: MIT @@ -3219,7 +3346,7 @@ struct static_const #endif template -inline constexpr std::array make_array(Args&& ... args) +constexpr std::array make_array(Args&& ... args) { return std::array {{static_cast(std::forward(args))...}}; } @@ -3230,27 +3357,27 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #include // numeric_limits +#include // char_traits +#include // tuple #include // false_type, is_constructible, is_integral, is_same, true_type #include // declval -#include // tuple -#include // char_traits // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3293,7 +3420,7 @@ struct iterator_traits template struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> - : iterator_types + : iterator_types { }; @@ -3315,10 +3442,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3335,10 +3462,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3359,10 +3486,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ @@ -3624,7 +3751,7 @@ struct char_traits : std::char_traits static constexpr int_type eof() noexcept { - return static_cast(EOF); + return static_cast(std::char_traits::eof()); } }; @@ -3648,7 +3775,7 @@ struct char_traits : std::char_traits static constexpr int_type eof() noexcept { - return static_cast(EOF); + return static_cast(std::char_traits::eof()); } }; @@ -3674,19 +3801,19 @@ struct is_default_constructible : std::is_default_constructible {}; template struct is_default_constructible> - : conjunction, is_default_constructible> {}; + : conjunction, is_default_constructible> {}; template struct is_default_constructible> - : conjunction, is_default_constructible> {}; + : conjunction, is_default_constructible> {}; template struct is_default_constructible> - : conjunction...> {}; + : conjunction...> {}; template struct is_default_constructible> - : conjunction...> {}; + : conjunction...> {}; template struct is_constructible : std::is_constructible {}; @@ -3884,8 +4011,8 @@ is_detected::value&& // special case for types like std::filesystem::path whose iterator's value_type are themselves // c.f. https://github.com/nlohmann/json/pull/3073 !std::is_same>::value&& - is_complete_type < - detected_t>::value >> +is_complete_type < +detected_t>::value >> { using value_type = range_value_t; @@ -4008,12 +4135,12 @@ using is_usable_as_key_type = typename std::conditional < template> using is_usable_as_basic_json_key_type = typename std::conditional < - is_usable_as_key_type::value - && !is_json_iterator_of::value, - std::true_type, - std::false_type >::type; + is_usable_as_key_type::value + && !is_json_iterator_of::value, + std::true_type, + std::false_type >::type; template using detect_erase_with_key_type = decltype(std::declval().erase(std::declval())); @@ -4147,7 +4274,7 @@ struct value_in_range_of_impl1 }; template -inline constexpr bool value_in_range_of(T val) +constexpr bool value_in_range_of(T val) { return value_in_range_of_impl1::test(val); } @@ -4163,7 +4290,7 @@ namespace impl { template -inline constexpr bool is_c_string() +constexpr bool is_c_string() { using TUnExt = typename std::remove_extent::type; using TUnCVExt = typename std::remove_cv::type; @@ -4191,7 +4318,7 @@ namespace impl { template -inline constexpr bool is_transparent() +constexpr bool is_transparent() { return is_detected::value; } @@ -4210,10 +4337,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -4358,6 +4485,18 @@ inline OutStringType concat(Args && ... args) NLOHMANN_JSON_NAMESPACE_END +// With -Wweak-vtables, Clang will complain about the exception classes as they +// have no out-of-line virtual method definitions and their vtable will be +// emitted in every translation unit. This issue cannot be fixed with a +// header-only library as there is no implementation file to move these +// functions to. As a result, we suppress this warning here to avoid client +// code to stumble over this. See https://github.com/nlohmann/json/issues/4087 +// for a discussion. +#if defined(__clang__) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wweak-vtables" +#endif + NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { @@ -4452,16 +4591,34 @@ class exception : public std::exception { return concat(a, '/', detail::escape(b)); }); - return concat('(', str, ") "); + + return concat('(', str, ") ", get_byte_positions(leaf_element)); #else - static_cast(leaf_element); - return ""; + return get_byte_positions(leaf_element); #endif } private: /// an exception object as storage for error messages std::runtime_error m; +#if JSON_DIAGNOSTIC_POSITIONS + template + static std::string get_byte_positions(const BasicJsonType* leaf_element) + { + if ((leaf_element->start_pos() != std::string::npos) && (leaf_element->end_pos() != std::string::npos)) + { + return concat("(bytes ", std::to_string(leaf_element->start_pos()), "-", std::to_string(leaf_element->end_pos()), ") "); + } + return ""; + } +#else + template + static std::string get_byte_positions(const BasicJsonType* leaf_element) + { + static_cast(leaf_element); + return ""; + } +#endif }; /// @brief exception indicating a parse error @@ -4589,6 +4746,10 @@ class other_error : public exception } // namespace detail NLOHMANN_JSON_NAMESPACE_END +#if defined(__clang__) + #pragma clang diagnostic pop +#endif + // #include // #include @@ -4596,10 +4757,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -4620,10 +4781,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -4640,7 +4801,7 @@ namespace std_fs = std::experimental::filesystem; } // namespace detail NLOHMANN_JSON_NAMESPACE_END #elif JSON_HAS_FILESYSTEM -#include +#include // NOLINT(build/c++17) NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { @@ -4670,6 +4831,24 @@ inline void from_json(const BasicJsonType& j, typename std::nullptr_t& n) n = nullptr; } +#ifdef JSON_HAS_CPP_17 +#ifndef JSON_USE_IMPLICIT_CONVERSIONS +template +void from_json(const BasicJsonType& j, std::optional& opt) +{ + if (j.is_null()) + { + opt = std::nullopt; + } + else + { + opt.emplace(j.template get()); + } +} + +#endif // JSON_USE_IMPLICIT_CONVERSIONS +#endif // JSON_HAS_CPP_17 + // overloads for basic_json template parameters template < typename BasicJsonType, typename ArithmeticType, enable_if_t < std::is_arithmetic::value&& @@ -4817,6 +4996,54 @@ auto from_json(const BasicJsonType& j, T (&arr)[N]) // NOLINT(cppcoreguidelines } } +template +auto from_json(const BasicJsonType& j, T (&arr)[N1][N2]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) +-> decltype(j.template get(), void()) +{ + for (std::size_t i1 = 0; i1 < N1; ++i1) + { + for (std::size_t i2 = 0; i2 < N2; ++i2) + { + arr[i1][i2] = j.at(i1).at(i2).template get(); + } + } +} + +template +auto from_json(const BasicJsonType& j, T (&arr)[N1][N2][N3]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) +-> decltype(j.template get(), void()) +{ + for (std::size_t i1 = 0; i1 < N1; ++i1) + { + for (std::size_t i2 = 0; i2 < N2; ++i2) + { + for (std::size_t i3 = 0; i3 < N3; ++i3) + { + arr[i1][i2][i3] = j.at(i1).at(i2).at(i3).template get(); + } + } + } +} + +template +auto from_json(const BasicJsonType& j, T (&arr)[N1][N2][N3][N4]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) +-> decltype(j.template get(), void()) +{ + for (std::size_t i1 = 0; i1 < N1; ++i1) + { + for (std::size_t i2 = 0; i2 < N2; ++i2) + { + for (std::size_t i3 = 0; i3 < N3; ++i3) + { + for (std::size_t i4 = 0; i4 < N4; ++i4) + { + arr[i1][i2][i3][i4] = j.at(i1).at(i2).at(i3).at(i4).template get(); + } + } + } + } +} + template inline void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/) { @@ -4902,7 +5129,7 @@ void()) template < typename BasicJsonType, typename T, std::size_t... Idx > std::array from_json_inplace_array_impl(BasicJsonType&& j, - identity_tag> /*unused*/, index_sequence /*unused*/) + identity_tag> /*unused*/, index_sequence /*unused*/) { return { { std::forward(j).at(Idx).template get()... } }; } @@ -5006,6 +5233,12 @@ std::tuple from_json_tuple_impl_base(BasicJsonType&& j, index_sequence< return std::make_tuple(std::forward(j).at(Idx).template get()...); } +template +std::tuple<> from_json_tuple_impl_base(BasicJsonType& /*unused*/, index_sequence<> /*unused*/) +{ + return {}; +} + template < typename BasicJsonType, class A1, class A2 > std::pair from_json_tuple_impl(BasicJsonType&& j, identity_tag> /*unused*/, priority_tag<0> /*unused*/) { @@ -5091,7 +5324,12 @@ inline void from_json(const BasicJsonType& j, std_fs::path& p) { JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j)); } - p = *j.template get_ptr(); + const auto& s = *j.template get_ptr(); +#ifdef JSON_HAS_CPP_20 + p = std_fs::path(std::u8string_view(reinterpret_cast(s.data()), s.size())); +#else + p = std_fs::u8path(s); // accepts UTF-8 encoded std::string in C++17, deprecated in C++20 +#endif } #endif @@ -5126,14 +5364,20 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT +// #include +// JSON_HAS_CPP_17 +#ifdef JSON_HAS_CPP_17 + #include // optional +#endif + #include // copy #include // begin, end #include // string @@ -5146,17 +5390,16 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #include // size_t -#include // input_iterator_tag -#include // string, to_string +#include // forward_iterator_tag #include // tuple_size, get, tuple_element #include // move @@ -5168,6 +5411,46 @@ NLOHMANN_JSON_NAMESPACE_END // #include +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // size_t +#include // string, to_string + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +template +void int_to_string(StringType& target, std::size_t value) +{ + // For ADL + using std::to_string; + target = to_string(value); +} + +template +StringType to_string(std::size_t value) +{ + StringType result; + int_to_string(result, value); + return result; +} + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + // #include @@ -5175,13 +5458,6 @@ NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { -template -void int_to_string( string_type& target, std::size_t value ) -{ - // For ADL - using std::to_string; - target = to_string(value); -} template class iteration_proxy_value { public: @@ -5189,7 +5465,7 @@ template class iteration_proxy_value using value_type = iteration_proxy_value; using pointer = value_type *; using reference = value_type &; - using iterator_category = std::input_iterator_tag; + using iterator_category = std::forward_iterator_tag; using string_type = typename std::remove_cv< typename std::remove_reference().key() ) >::type >::type; private: @@ -5369,7 +5645,7 @@ namespace std #endif template class tuple_size<::nlohmann::detail::iteration_proxy_value> // NOLINT(cert-dcl58-cpp) - : public std::integral_constant {}; + : public std::integral_constant {}; template class tuple_element> // NOLINT(cert-dcl58-cpp) @@ -5390,8 +5666,6 @@ class tuple_element> inline constexpr bool ::std::ranges::enable_borrowed_range<::nlohmann::detail::iteration_proxy> = true; #endif -// #include - // #include // #include @@ -5637,6 +5911,22 @@ struct external_constructor // to_json // ///////////// +#ifdef JSON_HAS_CPP_17 +template::value, int> = 0> +void to_json(BasicJsonType& j, const std::optional& opt) +{ + if (opt.has_value()) + { + j = *opt; + } + else + { + j = nullptr; + } +} +#endif + template::value, int> = 0> inline void to_json(BasicJsonType& j, T b) noexcept @@ -5783,6 +6073,13 @@ inline void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence< j = { std::get(t)... }; } +template +inline void to_json_tuple_impl(BasicJsonType& j, const Tuple& /*unused*/, index_sequence<> /*unused*/) +{ + using array_t = typename BasicJsonType::array_t; + j = array_t(); +} + template::value, int > = 0> inline void to_json(BasicJsonType& j, const T& t) { @@ -5793,7 +6090,12 @@ inline void to_json(BasicJsonType& j, const T& t) template inline void to_json(BasicJsonType& j, const std_fs::path& p) { - j = p.string(); +#ifdef JSON_HAS_CPP_20 + const std::u8string s = p.u8string(); + j = std::string(s.begin(), s.end()); +#else + j = p.u8string(); // returns std::string in C++17 +#endif } #endif @@ -5868,10 +6170,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -5980,10 +6282,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -6113,10 +6415,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -6133,16 +6435,19 @@ NLOHMANN_JSON_NAMESPACE_END #include // char_traits, string #include // make_pair, move #include // vector +#ifdef __cpp_lib_byteswap + #include //byteswap +#endif // #include // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -6162,6 +6467,8 @@ NLOHMANN_JSON_NAMESPACE_END #include // istream #endif // JSON_NO_IO +// #include + // #include // #include @@ -6209,6 +6516,13 @@ class file_input_adapter return std::fgetc(m_file); } + // returns the number of characters successfully read + template + std::size_t get_elements(T* dest, std::size_t count = 1) + { + return fread(dest, 1, sizeof(T) * count, m_file); + } + private: /// the file pointer to read from std::FILE* m_file; @@ -6268,6 +6582,17 @@ class input_stream_adapter return res; } + template + std::size_t get_elements(T* dest, std::size_t count = 1) + { + auto res = static_cast(sb->sgetn(reinterpret_cast(dest), static_cast(count * sizeof(T)))); + if (JSON_HEDLEY_UNLIKELY(res < count * sizeof(T))) + { + is->clear(is->rdstate() | std::ios::eofbit); + } + return res; + } + private: /// the associated input stream std::istream* is = nullptr; @@ -6299,6 +6624,26 @@ class iterator_input_adapter return char_traits::eof(); } + // for general iterators, we cannot really do something better than falling back to processing the range one-by-one + template + std::size_t get_elements(T* dest, std::size_t count = 1) + { + auto* ptr = reinterpret_cast(dest); + for (std::size_t read_index = 0; read_index < count * sizeof(T); ++read_index) + { + if (JSON_HEDLEY_LIKELY(current != end)) + { + ptr[read_index] = static_cast(*current); + std::advance(current, 1); + } + else + { + return read_index; + } + } + return count * sizeof(T); + } + private: IteratorType current; IteratorType end; @@ -6462,6 +6807,13 @@ class wide_string_input_adapter return utf8_bytes[utf8_bytes_index++]; } + // parsing binary with wchar doesn't make sense, but since the parsing mode can be runtime, we need something here + template + std::size_t get_elements(T* /*dest*/, std::size_t /*count*/ = 1) + { + JSON_THROW(parse_error::create(112, 1, "wide string type cannot be interpreted as binary data", nullptr)); + } + private: BaseInputAdapter base_adapter; @@ -6558,10 +6910,17 @@ typename container_input_adapter_factory_impl::container_input_adapter_factory::create(container); } +// specialization for std::string +using string_input_adapter_type = decltype(input_adapter(std::declval())); + #ifndef JSON_NO_IO // Special cases with fast paths inline file_input_adapter input_adapter(std::FILE* file) { + if (file == nullptr) + { + JSON_THROW(parse_error::create(101, 0, "attempting to parse an empty input; check that your input string or stream contains the expected JSON", nullptr)); + } return file_input_adapter(file); } @@ -6588,9 +6947,13 @@ template < typename CharT, int >::type = 0 > contiguous_bytes_input_adapter input_adapter(CharT b) { + if (b == nullptr) + { + JSON_THROW(parse_error::create(101, 0, "attempting to parse an empty input; check that your input string or stream contains the expected JSON", nullptr)); + } auto length = std::strlen(reinterpret_cast(b)); const auto* ptr = reinterpret_cast(b); - return input_adapter(ptr, ptr + length); + return input_adapter(ptr, ptr + length); // cppcheck-suppress[nullPointerArithmeticRedundantCheck] } template @@ -6636,742 +6999,29 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #include #include // string +#include // enable_if_t #include // move #include // vector // #include -// #include - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN - -/*! -@brief SAX interface - -This class describes the SAX interface used by @ref nlohmann::json::sax_parse. -Each function is called in different situations while the input is parsed. The -boolean return value informs the parser whether to continue processing the -input. -*/ -template -struct json_sax -{ - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - /*! - @brief a null value was read - @return whether parsing should proceed - */ - virtual bool null() = 0; - - /*! - @brief a boolean value was read - @param[in] val boolean value - @return whether parsing should proceed - */ - virtual bool boolean(bool val) = 0; - - /*! - @brief an integer number was read - @param[in] val integer value - @return whether parsing should proceed - */ - virtual bool number_integer(number_integer_t val) = 0; - - /*! - @brief an unsigned integer number was read - @param[in] val unsigned integer value - @return whether parsing should proceed - */ - virtual bool number_unsigned(number_unsigned_t val) = 0; - - /*! - @brief a floating-point number was read - @param[in] val floating-point value - @param[in] s raw token value - @return whether parsing should proceed - */ - virtual bool number_float(number_float_t val, const string_t& s) = 0; - - /*! - @brief a string value was read - @param[in] val string value - @return whether parsing should proceed - @note It is safe to move the passed string value. - */ - virtual bool string(string_t& val) = 0; - - /*! - @brief a binary value was read - @param[in] val binary value - @return whether parsing should proceed - @note It is safe to move the passed binary value. - */ - virtual bool binary(binary_t& val) = 0; - - /*! - @brief the beginning of an object was read - @param[in] elements number of object elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_object(std::size_t elements) = 0; - - /*! - @brief an object key was read - @param[in] val object key - @return whether parsing should proceed - @note It is safe to move the passed string. - */ - virtual bool key(string_t& val) = 0; - - /*! - @brief the end of an object was read - @return whether parsing should proceed - */ - virtual bool end_object() = 0; - - /*! - @brief the beginning of an array was read - @param[in] elements number of array elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_array(std::size_t elements) = 0; - - /*! - @brief the end of an array was read - @return whether parsing should proceed - */ - virtual bool end_array() = 0; - - /*! - @brief a parse error occurred - @param[in] position the position in the input where the error occurs - @param[in] last_token the last read token - @param[in] ex an exception object describing the error - @return whether parsing should proceed (must return false) - */ - virtual bool parse_error(std::size_t position, - const std::string& last_token, - const detail::exception& ex) = 0; - - json_sax() = default; - json_sax(const json_sax&) = default; - json_sax(json_sax&&) noexcept = default; - json_sax& operator=(const json_sax&) = default; - json_sax& operator=(json_sax&&) noexcept = default; - virtual ~json_sax() = default; -}; - -namespace detail -{ -/*! -@brief SAX implementation to create a JSON value from SAX events - -This class implements the @ref json_sax interface and processes the SAX events -to create a JSON value which makes it basically a DOM parser. The structure or -hierarchy of the JSON value is managed by the stack `ref_stack` which contains -a pointer to the respective array or object for each recursion depth. - -After successful parsing, the value that is passed by reference to the -constructor contains the parsed value. - -@tparam BasicJsonType the JSON type -*/ -template -class json_sax_dom_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - /*! - @param[in,out] r reference to a JSON value that is manipulated while - parsing - @param[in] allow_exceptions_ whether parse errors yield exceptions - */ - explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true) - : root(r), allow_exceptions(allow_exceptions_) - {} - - // make class move-only - json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - ~json_sax_dom_parser() = default; - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool binary(binary_t& val) - { - handle_value(std::move(val)); - return true; - } - - bool start_object(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); - - if (JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool key(string_t& val) - { - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(ref_stack.back()->is_object()); - - // add null at given key and store the reference for later - object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val)); - return true; - } - - bool end_object() - { - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(ref_stack.back()->is_object()); - - ref_stack.back()->set_parents(); - ref_stack.pop_back(); - return true; - } - - bool start_array(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); - - if (JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool end_array() - { - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(ref_stack.back()->is_array()); - - ref_stack.back()->set_parents(); - ref_stack.pop_back(); - return true; - } - - template - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const Exception& ex) - { - errored = true; - static_cast(ex); - if (allow_exceptions) - { - JSON_THROW(ex); - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - */ - template - JSON_HEDLEY_RETURNS_NON_NULL - BasicJsonType* handle_value(Value&& v) - { - if (ref_stack.empty()) - { - root = BasicJsonType(std::forward(v)); - return &root; - } - - JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); - - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_data.m_value.array->emplace_back(std::forward(v)); - return &(ref_stack.back()->m_data.m_value.array->back()); - } - - JSON_ASSERT(ref_stack.back()->is_object()); - JSON_ASSERT(object_element); - *object_element = BasicJsonType(std::forward(v)); - return object_element; - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack {}; - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; -}; - -template -class json_sax_dom_callback_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - using parser_callback_t = typename BasicJsonType::parser_callback_t; - using parse_event_t = typename BasicJsonType::parse_event_t; - - json_sax_dom_callback_parser(BasicJsonType& r, - const parser_callback_t cb, - const bool allow_exceptions_ = true) - : root(r), callback(cb), allow_exceptions(allow_exceptions_) - { - keep_stack.push_back(true); - } - - // make class move-only - json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - ~json_sax_dom_callback_parser() = default; - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool binary(binary_t& val) - { - handle_value(std::move(val)); - return true; - } - - bool start_object(std::size_t len) - { - // check callback for object start - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::object, true); - ref_stack.push_back(val.second); - - // check object limit - if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool key(string_t& val) - { - BasicJsonType k = BasicJsonType(val); - - // check callback for key - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); - key_keep_stack.push_back(keep); - - // add discarded value at given key and store the reference for later - if (keep && ref_stack.back()) - { - object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val) = discarded); - } - - return true; - } - - bool end_object() - { - if (ref_stack.back()) - { - if (!callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) - { - // discard object - *ref_stack.back() = discarded; - } - else - { - ref_stack.back()->set_parents(); - } - } - - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(!keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured()) - { - // remove discarded value - for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) - { - if (it->is_discarded()) - { - ref_stack.back()->erase(it); - break; - } - } - } - - return true; - } - - bool start_array(std::size_t len) - { - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::array, true); - ref_stack.push_back(val.second); - - // check array limit - if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool end_array() - { - bool keep = true; - - if (ref_stack.back()) - { - keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); - if (keep) - { - ref_stack.back()->set_parents(); - } - else - { - // discard array - *ref_stack.back() = discarded; - } - } - - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(!keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - // remove discarded value - if (!keep && !ref_stack.empty() && ref_stack.back()->is_array()) - { - ref_stack.back()->m_data.m_value.array->pop_back(); - } - - return true; - } - - template - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const Exception& ex) - { - errored = true; - static_cast(ex); - if (allow_exceptions) - { - JSON_THROW(ex); - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @param[in] v value to add to the JSON value we build during parsing - @param[in] skip_callback whether we should skip calling the callback - function; this is required after start_array() and - start_object() SAX events, because otherwise we would call the - callback function with an empty array or object, respectively. - - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - - @return pair of boolean (whether value should be kept) and pointer (to the - passed value in the ref_stack hierarchy; nullptr if not kept) - */ - template - std::pair handle_value(Value&& v, const bool skip_callback = false) - { - JSON_ASSERT(!keep_stack.empty()); - - // do not handle this value if we know it would be added to a discarded - // container - if (!keep_stack.back()) - { - return {false, nullptr}; - } - - // create value - auto value = BasicJsonType(std::forward(v)); - - // check callback - const bool keep = skip_callback || callback(static_cast(ref_stack.size()), parse_event_t::value, value); - - // do not handle this value if we just learnt it shall be discarded - if (!keep) - { - return {false, nullptr}; - } - - if (ref_stack.empty()) - { - root = std::move(value); - return {true, & root}; - } - - // skip this value if we already decided to skip the parent - // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) - if (!ref_stack.back()) - { - return {false, nullptr}; - } - - // we now only expect arrays and objects - JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); - - // array - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_data.m_value.array->emplace_back(std::move(value)); - return {true, & (ref_stack.back()->m_data.m_value.array->back())}; - } - - // object - JSON_ASSERT(ref_stack.back()->is_object()); - // check if we should store an element for the current key - JSON_ASSERT(!key_keep_stack.empty()); - const bool store_element = key_keep_stack.back(); - key_keep_stack.pop_back(); - - if (!store_element) - { - return {false, nullptr}; - } - - JSON_ASSERT(object_element); - *object_element = std::move(value); - return {true, object_element}; - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack {}; - /// stack to manage which values to keep - std::vector keep_stack {}; // NOLINT(readability-redundant-member-init) - /// stack to manage which object keys to keep - std::vector key_keep_stack {}; // NOLINT(readability-redundant-member-init) - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// callback function - const parser_callback_t callback = nullptr; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; - /// a discarded value for the callback - BasicJsonType discarded = BasicJsonType::value_t::discarded; -}; - -template -class json_sax_acceptor -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - bool null() - { - return true; - } - - bool boolean(bool /*unused*/) - { - return true; - } - - bool number_integer(number_integer_t /*unused*/) - { - return true; - } - - bool number_unsigned(number_unsigned_t /*unused*/) - { - return true; - } - - bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) - { - return true; - } - - bool string(string_t& /*unused*/) - { - return true; - } - - bool binary(binary_t& /*unused*/) - { - return true; - } - - bool start_object(std::size_t /*unused*/ = static_cast(-1)) - { - return true; - } - - bool key(string_t& /*unused*/) - { - return true; - } - - bool end_object() - { - return true; - } - - bool start_array(std::size_t /*unused*/ = static_cast(-1)) - { - return true; - } - - bool end_array() - { - return true; - } - - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) - { - return false; - } -}; - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -8339,7 +7989,7 @@ class lexer : public lexer_base locale's decimal point is used instead of `.` to work with the locale-dependent converters. */ - token_type scan_number() // lgtm [cpp/use-of-goto] + token_type scan_number() // lgtm [cpp/use-of-goto] `goto` is used in this function to implement the number-parsing state machine described above. By design, any finite input will eventually reach the "done" state or return token_type::parse_error. In each intermediate state, 1 byte of the input is appended to the token_buffer vector, and only the already initialized variables token_buffer, number_type, and error_message are manipulated. { // reset token_buffer to store the number's bytes reset(); @@ -8421,6 +8071,7 @@ scan_number_zero: case '.': { add(decimal_point_char); + decimal_point_position = token_buffer.size() - 1; goto scan_number_decimal1; } @@ -8457,6 +8108,7 @@ scan_number_any1: case '.': { add(decimal_point_char); + decimal_point_position = token_buffer.size() - 1; goto scan_number_decimal1; } @@ -8617,7 +8269,7 @@ scan_number_done: // we are done scanning a number) unget(); - char* endptr = nullptr; // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) + char* endptr = nullptr; // NOLINT(misc-const-correctness,cppcoreguidelines-pro-type-vararg,hicpp-vararg) errno = 0; // try to parse integers first and fall back to floats @@ -8628,7 +8280,7 @@ scan_number_done: // we checked the number format before JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); - if (errno == 0) + if (errno != ERANGE) { value_unsigned = static_cast(x); if (value_unsigned == x) @@ -8644,7 +8296,7 @@ scan_number_done: // we checked the number format before JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); - if (errno == 0) + if (errno != ERANGE) { value_integer = static_cast(x); if (value_integer == x) @@ -8694,6 +8346,7 @@ scan_number_done: { token_buffer.clear(); token_string.clear(); + decimal_point_position = std::string::npos; token_string.push_back(char_traits::to_char_type(current)); } @@ -8802,6 +8455,11 @@ scan_number_done: /// return current string value (implicitly resets the token; useful only once) string_t& get_string() { + // translate decimal points from locale back to '.' (#4084) + if (decimal_point_char != '.' && decimal_point_position != std::string::npos) + { + token_buffer[decimal_point_position] = '.'; + } return token_buffer; } @@ -8999,6 +8657,8 @@ scan_number_done: /// the decimal point const char_int_type decimal_point_char = '.'; + /// the position of the decimal point in the input + std::size_t decimal_point_position = std::string::npos; }; } // namespace detail @@ -9006,13 +8666,986 @@ NLOHMANN_JSON_NAMESPACE_END // #include +// #include + +NLOHMANN_JSON_NAMESPACE_BEGIN + +/*! +@brief SAX interface + +This class describes the SAX interface used by @ref nlohmann::json::sax_parse. +Each function is called in different situations while the input is parsed. The +boolean return value informs the parser whether to continue processing the +input. +*/ +template +struct json_sax +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + + /*! + @brief a null value was read + @return whether parsing should proceed + */ + virtual bool null() = 0; + + /*! + @brief a boolean value was read + @param[in] val boolean value + @return whether parsing should proceed + */ + virtual bool boolean(bool val) = 0; + + /*! + @brief an integer number was read + @param[in] val integer value + @return whether parsing should proceed + */ + virtual bool number_integer(number_integer_t val) = 0; + + /*! + @brief an unsigned integer number was read + @param[in] val unsigned integer value + @return whether parsing should proceed + */ + virtual bool number_unsigned(number_unsigned_t val) = 0; + + /*! + @brief a floating-point number was read + @param[in] val floating-point value + @param[in] s raw token value + @return whether parsing should proceed + */ + virtual bool number_float(number_float_t val, const string_t& s) = 0; + + /*! + @brief a string value was read + @param[in] val string value + @return whether parsing should proceed + @note It is safe to move the passed string value. + */ + virtual bool string(string_t& val) = 0; + + /*! + @brief a binary value was read + @param[in] val binary value + @return whether parsing should proceed + @note It is safe to move the passed binary value. + */ + virtual bool binary(binary_t& val) = 0; + + /*! + @brief the beginning of an object was read + @param[in] elements number of object elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_object(std::size_t elements) = 0; + + /*! + @brief an object key was read + @param[in] val object key + @return whether parsing should proceed + @note It is safe to move the passed string. + */ + virtual bool key(string_t& val) = 0; + + /*! + @brief the end of an object was read + @return whether parsing should proceed + */ + virtual bool end_object() = 0; + + /*! + @brief the beginning of an array was read + @param[in] elements number of array elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_array(std::size_t elements) = 0; + + /*! + @brief the end of an array was read + @return whether parsing should proceed + */ + virtual bool end_array() = 0; + + /*! + @brief a parse error occurred + @param[in] position the position in the input where the error occurs + @param[in] last_token the last read token + @param[in] ex an exception object describing the error + @return whether parsing should proceed (must return false) + */ + virtual bool parse_error(std::size_t position, + const std::string& last_token, + const detail::exception& ex) = 0; + + json_sax() = default; + json_sax(const json_sax&) = default; + json_sax(json_sax&&) noexcept = default; + json_sax& operator=(const json_sax&) = default; + json_sax& operator=(json_sax&&) noexcept = default; + virtual ~json_sax() = default; +}; + +namespace detail +{ +constexpr std::size_t unknown_size() +{ + return (std::numeric_limits::max)(); +} + +/*! +@brief SAX implementation to create a JSON value from SAX events + +This class implements the @ref json_sax interface and processes the SAX events +to create a JSON value which makes it basically a DOM parser. The structure or +hierarchy of the JSON value is managed by the stack `ref_stack` which contains +a pointer to the respective array or object for each recursion depth. + +After successful parsing, the value that is passed by reference to the +constructor contains the parsed value. + +@tparam BasicJsonType the JSON type +*/ +template +class json_sax_dom_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using lexer_t = lexer; + + /*! + @param[in,out] r reference to a JSON value that is manipulated while + parsing + @param[in] allow_exceptions_ whether parse errors yield exceptions + */ + explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true, lexer_t* lexer_ = nullptr) + : root(r), allow_exceptions(allow_exceptions_), m_lexer_ref(lexer_) + {} + + // make class move-only + json_sax_dom_parser(const json_sax_dom_parser&) = delete; + json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + ~json_sax_dom_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool binary(binary_t& val) + { + handle_value(std::move(val)); + return true; + } + + bool start_object(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the object here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + // Lexer has read the first character of the object, so + // subtract 1 from the position to get the correct start position. + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); + } + + return true; + } + + bool key(string_t& val) + { + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(ref_stack.back()->is_object()); + + // add null at given key and store the reference for later + object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val)); + return true; + } + + bool end_object() + { + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(ref_stack.back()->is_object()); + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing brace, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + ref_stack.pop_back(); + return true; + } + + bool start_array(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the array here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); + } + + return true; + } + + bool end_array() + { + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(ref_stack.back()->is_array()); + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing bracket, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + ref_stack.pop_back(); + return true; + } + + template + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const Exception& ex) + { + errored = true; + static_cast(ex); + if (allow_exceptions) + { + JSON_THROW(ex); + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + +#if JSON_DIAGNOSTIC_POSITIONS + void handle_diagnostic_positions_for_json_value(BasicJsonType& v) + { + if (m_lexer_ref) + { + // Lexer has read past the current field value, so set the end position to the current position. + // The start position will be set below based on the length of the string representation + // of the value. + v.end_position = m_lexer_ref->get_position(); + + switch (v.type()) + { + case value_t::boolean: + { + // 4 and 5 are the string length of "true" and "false" + v.start_position = v.end_position - (v.m_data.m_value.boolean ? 4 : 5); + break; + } + + case value_t::null: + { + // 4 is the string length of "null" + v.start_position = v.end_position - 4; + break; + } + + case value_t::string: + { + // include the length of the quotes, which is 2 + v.start_position = v.end_position - v.m_data.m_value.string->size() - 2; + break; + } + + // As we handle the start and end positions for values created during parsing, + // we do not expect the following value type to be called. Regardless, set the positions + // in case this is created manually or through a different constructor. Exclude from lcov + // since the exact condition of this switch is esoteric. + // LCOV_EXCL_START + case value_t::discarded: + { + v.end_position = std::string::npos; + v.start_position = v.end_position; + break; + } + // LCOV_EXCL_STOP + case value_t::binary: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::number_float: + { + v.start_position = v.end_position - m_lexer_ref->get_string().size(); + break; + } + case value_t::object: + case value_t::array: + { + // object and array are handled in start_object() and start_array() handlers + // skip setting the values here. + break; + } + default: // LCOV_EXCL_LINE + // Handle all possible types discretely, default handler should never be reached. + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert,-warnings-as-errors) LCOV_EXCL_LINE + } + } + } +#endif + + /*! + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + */ + template + JSON_HEDLEY_RETURNS_NON_NULL + BasicJsonType* handle_value(Value&& v) + { + if (ref_stack.empty()) + { + root = BasicJsonType(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(root); +#endif + + return &root; + } + + JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); + + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_data.m_value.array->emplace_back(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(ref_stack.back()->m_data.m_value.array->back()); +#endif + + return &(ref_stack.back()->m_data.m_value.array->back()); + } + + JSON_ASSERT(ref_stack.back()->is_object()); + JSON_ASSERT(object_element); + *object_element = BasicJsonType(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(*object_element); +#endif + + return object_element; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; + /// the lexer reference to obtain the current position + lexer_t* m_lexer_ref = nullptr; +}; + +template +class json_sax_dom_callback_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using parser_callback_t = typename BasicJsonType::parser_callback_t; + using parse_event_t = typename BasicJsonType::parse_event_t; + using lexer_t = lexer; + + json_sax_dom_callback_parser(BasicJsonType& r, + parser_callback_t cb, + const bool allow_exceptions_ = true, + lexer_t* lexer_ = nullptr) + : root(r), callback(std::move(cb)), allow_exceptions(allow_exceptions_), m_lexer_ref(lexer_) + { + keep_stack.push_back(true); + } + + // make class move-only + json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + ~json_sax_dom_callback_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool binary(binary_t& val) + { + handle_value(std::move(val)); + return true; + } + + bool start_object(std::size_t len) + { + // check callback for object start + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::object, true); + ref_stack.push_back(val.second); + + if (ref_stack.back()) + { + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the object here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + // Lexer has read the first character of the object, so + // subtract 1 from the position to get the correct start position. + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + // check object limit + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); + } + } + return true; + } + + bool key(string_t& val) + { + BasicJsonType k = BasicJsonType(val); + + // check callback for key + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); + key_keep_stack.push_back(keep); + + // add discarded value at given key and store the reference for later + if (keep && ref_stack.back()) + { + object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val) = discarded); + } + + return true; + } + + bool end_object() + { + if (ref_stack.back()) + { + if (!callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) + { + // discard object + *ref_stack.back() = discarded; + +#if JSON_DIAGNOSTIC_POSITIONS + // Set start/end positions for discarded object. + handle_diagnostic_positions_for_json_value(*ref_stack.back()); +#endif + } + else + { + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing brace, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + } + } + + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(!keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured()) + { + // remove discarded value + for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) + { + if (it->is_discarded()) + { + ref_stack.back()->erase(it); + break; + } + } + } + + return true; + } + + bool start_array(std::size_t len) + { + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::array, true); + ref_stack.push_back(val.second); + + if (ref_stack.back()) + { + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the array here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + // Lexer has read the first character of the array, so + // subtract 1 from the position to get the correct start position. + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + // check array limit + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); + } + } + + return true; + } + + bool end_array() + { + bool keep = true; + + if (ref_stack.back()) + { + keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); + if (keep) + { + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing bracket, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + } + else + { + // discard array + *ref_stack.back() = discarded; + +#if JSON_DIAGNOSTIC_POSITIONS + // Set start/end positions for discarded array. + handle_diagnostic_positions_for_json_value(*ref_stack.back()); +#endif + } + } + + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(!keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + // remove discarded value + if (!keep && !ref_stack.empty() && ref_stack.back()->is_array()) + { + ref_stack.back()->m_data.m_value.array->pop_back(); + } + + return true; + } + + template + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const Exception& ex) + { + errored = true; + static_cast(ex); + if (allow_exceptions) + { + JSON_THROW(ex); + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + +#if JSON_DIAGNOSTIC_POSITIONS + void handle_diagnostic_positions_for_json_value(BasicJsonType& v) + { + if (m_lexer_ref) + { + // Lexer has read past the current field value, so set the end position to the current position. + // The start position will be set below based on the length of the string representation + // of the value. + v.end_position = m_lexer_ref->get_position(); + + switch (v.type()) + { + case value_t::boolean: + { + // 4 and 5 are the string length of "true" and "false" + v.start_position = v.end_position - (v.m_data.m_value.boolean ? 4 : 5); + break; + } + + case value_t::null: + { + // 4 is the string length of "null" + v.start_position = v.end_position - 4; + break; + } + + case value_t::string: + { + // include the length of the quotes, which is 2 + v.start_position = v.end_position - v.m_data.m_value.string->size() - 2; + break; + } + + case value_t::discarded: + { + v.end_position = std::string::npos; + v.start_position = v.end_position; + break; + } + + case value_t::binary: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::number_float: + { + v.start_position = v.end_position - m_lexer_ref->get_string().size(); + break; + } + + case value_t::object: + case value_t::array: + { + // object and array are handled in start_object() and start_array() handlers + // skip setting the values here. + break; + } + default: // LCOV_EXCL_LINE + // Handle all possible types discretely, default handler should never be reached. + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert,-warnings-as-errors) LCOV_EXCL_LINE + } + } + } +#endif + + /*! + @param[in] v value to add to the JSON value we build during parsing + @param[in] skip_callback whether we should skip calling the callback + function; this is required after start_array() and + start_object() SAX events, because otherwise we would call the + callback function with an empty array or object, respectively. + + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + + @return pair of boolean (whether value should be kept) and pointer (to the + passed value in the ref_stack hierarchy; nullptr if not kept) + */ + template + std::pair handle_value(Value&& v, const bool skip_callback = false) + { + JSON_ASSERT(!keep_stack.empty()); + + // do not handle this value if we know it would be added to a discarded + // container + if (!keep_stack.back()) + { + return {false, nullptr}; + } + + // create value + auto value = BasicJsonType(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(value); +#endif + + // check callback + const bool keep = skip_callback || callback(static_cast(ref_stack.size()), parse_event_t::value, value); + + // do not handle this value if we just learnt it shall be discarded + if (!keep) + { + return {false, nullptr}; + } + + if (ref_stack.empty()) + { + root = std::move(value); + return {true, & root}; + } + + // skip this value if we already decided to skip the parent + // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) + if (!ref_stack.back()) + { + return {false, nullptr}; + } + + // we now only expect arrays and objects + JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); + + // array + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_data.m_value.array->emplace_back(std::move(value)); + return {true, & (ref_stack.back()->m_data.m_value.array->back())}; + } + + // object + JSON_ASSERT(ref_stack.back()->is_object()); + // check if we should store an element for the current key + JSON_ASSERT(!key_keep_stack.empty()); + const bool store_element = key_keep_stack.back(); + key_keep_stack.pop_back(); + + if (!store_element) + { + return {false, nullptr}; + } + + JSON_ASSERT(object_element); + *object_element = std::move(value); + return {true, object_element}; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// stack to manage which values to keep + std::vector keep_stack {}; // NOLINT(readability-redundant-member-init) + /// stack to manage which object keys to keep + std::vector key_keep_stack {}; // NOLINT(readability-redundant-member-init) + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// callback function + const parser_callback_t callback = nullptr; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; + /// a discarded value for the callback + BasicJsonType discarded = BasicJsonType::value_t::discarded; + /// the lexer reference to obtain the current position + lexer_t* m_lexer_ref = nullptr; +}; + +template +class json_sax_acceptor +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + + bool null() + { + return true; + } + + bool boolean(bool /*unused*/) + { + return true; + } + + bool number_integer(number_integer_t /*unused*/) + { + return true; + } + + bool number_unsigned(number_unsigned_t /*unused*/) + { + return true; + } + + bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) + { + return true; + } + + bool string(string_t& /*unused*/) + { + return true; + } + + bool binary(binary_t& /*unused*/) + { + return true; + } + + bool start_object(std::size_t /*unused*/ = detail::unknown_size()) + { + return true; + } + + bool key(string_t& /*unused*/) + { + return true; + } + + bool end_object() + { + return true; + } + + bool start_array(std::size_t /*unused*/ = detail::unknown_size()) + { + return true; + } + + bool end_array() + { + return true; + } + + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) + { + return false; + } +}; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include + +// #include + // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -9208,7 +9841,7 @@ static inline bool little_endianness(int num = 1) noexcept /*! @brief deserialization of CBOR, MessagePack, and UBJSON values */ -template> +template> class binary_reader { using number_integer_t = typename BasicJsonType::number_integer_t; @@ -9315,7 +9948,7 @@ class binary_reader std::int32_t document_size{}; get_number(input_format_t::bson, document_size); - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(detail::unknown_size()))) { return false; } @@ -9471,6 +10104,12 @@ class binary_reader return get_number(input_format_t::bson, value) && sax->number_integer(value); } + case 0x11: // uint64 + { + std::uint64_t value{}; + return get_number(input_format_t::bson, value) && sax->number_unsigned(value); + } + default: // anything else not supported (yet) { std::array cr{{}}; @@ -9537,7 +10176,7 @@ class binary_reader std::int32_t document_size{}; get_number(input_format_t::bson, document_size); - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(detail::unknown_size()))) { return false; } @@ -9797,7 +10436,7 @@ class binary_reader } case 0x9F: // array (indefinite length) - return get_cbor_array(static_cast(-1), tag_handler); + return get_cbor_array(detail::unknown_size(), tag_handler); // map (0x00..0x17 pairs of data items follow) case 0xA0: @@ -9851,7 +10490,7 @@ class binary_reader } case 0xBF: // map (indefinite length) - return get_cbor_object(static_cast(-1), tag_handler); + return get_cbor_object(detail::unknown_size(), tag_handler); case 0xC6: // tagged item case 0xC7: @@ -10239,7 +10878,7 @@ class binary_reader } /*! - @param[in] len the length of the array or static_cast(-1) for an + @param[in] len the length of the array or detail::unknown_size() for an array of indefinite size @param[in] tag_handler how CBOR tags should be treated @return whether array creation completed @@ -10252,7 +10891,7 @@ class binary_reader return false; } - if (len != static_cast(-1)) + if (len != detail::unknown_size()) { for (std::size_t i = 0; i < len; ++i) { @@ -10277,7 +10916,7 @@ class binary_reader } /*! - @param[in] len the length of the object or static_cast(-1) for an + @param[in] len the length of the object or detail::unknown_size() for an object of indefinite size @param[in] tag_handler how CBOR tags should be treated @return whether object creation completed @@ -10293,7 +10932,7 @@ class binary_reader if (len != 0) { string_t key; - if (len != static_cast(-1)) + if (len != detail::unknown_size()) { for (std::size_t i = 0; i < len; ++i) { @@ -11456,6 +12095,16 @@ class binary_reader case 'Z': // null return sax->null(); + case 'B': // byte + { + if (input_format != input_format_t::bjdata) + { + break; + } + std::uint8_t number{}; + return get_number(input_format, number) && sax->number_unsigned(number); + } + case 'U': { std::uint8_t number{}; @@ -11656,7 +12305,7 @@ class binary_reader return false; } - if (size_and_type.second == 'C') + if (size_and_type.second == 'C' || size_and_type.second == 'B') { size_and_type.second = 'U'; } @@ -11678,6 +12327,13 @@ class binary_reader return (sax->end_array() && sax->end_object()); } + // If BJData type marker is 'B' decode as binary + if (input_format == input_format_t::bjdata && size_and_type.first != npos && size_and_type.second == 'B') + { + binary_t result; + return get_binary(input_format, size_and_type.first, result) && sax->binary(result); + } + if (size_and_type.first != npos) { if (JSON_HEDLEY_UNLIKELY(!sax->start_array(size_and_type.first))) @@ -11711,7 +12367,7 @@ class binary_reader } else { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(detail::unknown_size()))) { return false; } @@ -11789,7 +12445,7 @@ class binary_reader } else { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(detail::unknown_size()))) { return false; } @@ -11900,6 +12556,29 @@ class binary_reader return current = ia.get_character(); } + /*! + @brief get_to read into a primitive type + + This function provides the interface to the used input adapter. It does + not throw in case the input reached EOF, but returns false instead + + @return bool, whether the read was successful + */ + template + bool get_to(T& dest, const input_format_t format, const char* context) + { + auto new_chars_read = ia.get_elements(&dest); + chars_read += new_chars_read; + if (JSON_HEDLEY_UNLIKELY(new_chars_read < sizeof(T))) + { + // in case of failure, advance position by 1 to report failing location + ++chars_read; + sax->parse_error(chars_read, "", parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context), nullptr)); + return false; + } + return true; + } + /*! @return character read from the input after ignoring all 'N' entries */ @@ -11914,6 +12593,28 @@ class binary_reader return current; } + template + static void byte_swap(NumberType& number) + { + constexpr std::size_t sz = sizeof(number); +#ifdef __cpp_lib_byteswap + if constexpr (sz == 1) + { + return; + } + if constexpr(std::is_integral_v) + { + number = std::byteswap(number); + return; + } +#endif + auto* ptr = reinterpret_cast(&number); + for (std::size_t i = 0; i < sz / 2; ++i) + { + std::swap(ptr[i], ptr[sz - i - 1]); + } + } + /* @brief read a number from the input @@ -11932,29 +12633,16 @@ class binary_reader template bool get_number(const input_format_t format, NumberType& result) { - // step 1: read input into array with system's byte order - std::array vec{}; - for (std::size_t i = 0; i < sizeof(NumberType); ++i) + // read in the original format + + if (JSON_HEDLEY_UNLIKELY(!get_to(result, format, "number"))) { - get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "number"))) - { - return false; - } - - // reverse byte order prior to conversion if necessary - if (is_little_endian != (InputIsLittleEndian || format == input_format_t::bjdata)) - { - vec[sizeof(NumberType) - i - 1] = static_cast(current); - } - else - { - vec[i] = static_cast(current); // LCOV_EXCL_LINE - } + return false; + } + if (is_little_endian != (InputIsLittleEndian || format == input_format_t::bjdata)) + { + byte_swap(result); } - - // step 2: convert array into number of type T and return - std::memcpy(&result, vec.data(), sizeof(NumberType)); return true; } @@ -12093,7 +12781,7 @@ class binary_reader } private: - static JSON_INLINE_VARIABLE constexpr std::size_t npos = static_cast(-1); + static JSON_INLINE_VARIABLE constexpr std::size_t npos = detail::unknown_size(); /// input adapter InputAdapterType ia; @@ -12119,6 +12807,7 @@ class binary_reader #define JSON_BINARY_READER_MAKE_BJD_TYPES_MAP_ \ make_array( \ + bjd_type{'B', "byte"}, \ bjd_type{'C', "char"}, \ bjd_type{'D', "double"}, \ bjd_type{'I', "int16"}, \ @@ -12161,10 +12850,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -12238,10 +12927,10 @@ class parser public: /// a parser reading from an input adapter explicit parser(InputAdapterType&& adapter, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions_ = true, const bool skip_comments = false) - : callback(cb) + : callback(std::move(cb)) , m_lexer(std::move(adapter), skip_comments) , allow_exceptions(allow_exceptions_) { @@ -12263,7 +12952,7 @@ class parser { if (callback) { - json_sax_dom_callback_parser sdp(result, callback, allow_exceptions); + json_sax_dom_callback_parser sdp(result, callback, allow_exceptions, &m_lexer); sax_parse_internal(&sdp); // in strict mode, input must be completely read @@ -12291,7 +12980,7 @@ class parser } else { - json_sax_dom_parser sdp(result, allow_exceptions); + json_sax_dom_parser sdp(result, allow_exceptions, &m_lexer); sax_parse_internal(&sdp); // in strict mode, input must be completely read @@ -12363,7 +13052,7 @@ class parser { case token_type::begin_object: { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(detail::unknown_size()))) { return false; } @@ -12408,7 +13097,7 @@ class parser case token_type::begin_array: { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(detail::unknown_size()))) { return false; } @@ -12690,10 +13379,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -12703,10 +13392,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -12862,10 +13551,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -13332,7 +14021,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > bool operator==(const IterImpl& other) const @@ -13343,7 +14032,11 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object)); } - JSON_ASSERT(m_object != nullptr); + // value-initialized forward iterators can be compared, and must compare equal to other value-initialized iterators of the same type #4493 + if (m_object == nullptr) + { + return true; + } switch (m_object->m_data.m_type) { @@ -13368,7 +14061,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: not equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > bool operator!=(const IterImpl& other) const @@ -13378,7 +14071,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: smaller - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ bool operator<(const iter_impl& other) const { @@ -13388,7 +14081,12 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object)); } - JSON_ASSERT(m_object != nullptr); + // value-initialized forward iterators can be compared, and must compare equal to other value-initialized iterators of the same type #4493 + if (m_object == nullptr) + { + // the iterators are both value-initialized and are to be considered equal, but this function checks for smaller, so we return false + return false; + } switch (m_object->m_data.m_type) { @@ -13413,7 +14111,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: less than or equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ bool operator<=(const iter_impl& other) const { @@ -13422,7 +14120,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: greater than - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ bool operator>(const iter_impl& other) const { @@ -13431,7 +14129,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: greater than or equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) The iterator is initialized; i.e. `m_object != nullptr`, or (2) both iterators are value-initialized. */ bool operator>=(const iter_impl& other) const { @@ -13624,10 +14322,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -13759,10 +14457,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -13801,10 +14499,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -14034,7 +14732,7 @@ class json_pointer } const char* p = s.c_str(); - char* p_end = nullptr; + char* p_end = nullptr; // NOLINT(misc-const-correctness) errno = 0; // strtoull doesn't reset errno const unsigned long long res = std::strtoull(p, &p_end, 10); // NOLINT(runtime/int) if (p == p_end // invalid input or empty string @@ -14556,7 +15254,7 @@ class json_pointer // iterate array and use index as reference string for (std::size_t i = 0; i < value.m_data.m_value.array->size(); ++i) { - flatten(detail::concat(reference_string, '/', std::to_string(i)), + flatten(detail::concat(reference_string, '/', std::to_string(i)), value.m_data.m_value.array->operator[](i), result); } } @@ -14575,7 +15273,7 @@ class json_pointer // iterate object and use keys as reference string for (const auto& element : *value.m_data.m_value.object) { - flatten(detail::concat(reference_string, '/', detail::escape(element.first)), element.second, result); + flatten(detail::concat(reference_string, '/', detail::escape(element.first)), element.second, result); } } break; @@ -14796,10 +15494,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -14881,6 +15579,8 @@ NLOHMANN_JSON_NAMESPACE_END // #include +// #include + // #include // #include @@ -14888,10 +15588,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -14914,10 +15614,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -15068,6 +15768,13 @@ NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { +/// how to encode BJData +enum class bjdata_version_t +{ + draft2, + draft3, +}; + /////////////////// // binary writer // /////////////////// @@ -15652,7 +16359,7 @@ class binary_writer case value_t::binary: { // step 0: determine if the binary type has a set subtype to - // determine whether or not to use the ext or fixext types + // determine whether to use the ext or fixext types const bool use_ext = j.m_data.m_value.binary->has_subtype(); // step 1: write control byte and the byte string length @@ -15775,11 +16482,14 @@ class binary_writer @param[in] use_type whether to use '$' prefixes (optimized format) @param[in] add_prefix whether prefixes need to be used for this value @param[in] use_bjdata whether write in BJData format, default is false + @param[in] bjdata_version which BJData version to use, default is draft2 */ void write_ubjson(const BasicJsonType& j, const bool use_count, const bool use_type, const bool add_prefix = true, - const bool use_bjdata = false) + const bool use_bjdata = false, const bjdata_version_t bjdata_version = bjdata_version_t::draft2) { + const bool bjdata_draft3 = use_bjdata && bjdata_version == bjdata_version_t::draft3; + switch (j.type()) { case value_t::null: @@ -15869,7 +16579,7 @@ class binary_writer for (const auto& el : *j.m_data.m_value.array) { - write_ubjson(el, use_count, use_type, prefix_required, use_bjdata); + write_ubjson(el, use_count, use_type, prefix_required, use_bjdata, bjdata_version); } if (!use_count) @@ -15887,11 +16597,11 @@ class binary_writer oa->write_character(to_char_type('[')); } - if (use_type && !j.m_data.m_value.binary->empty()) + if (use_type && (bjdata_draft3 || !j.m_data.m_value.binary->empty())) { JSON_ASSERT(use_count); oa->write_character(to_char_type('$')); - oa->write_character('U'); + oa->write_character(bjdata_draft3 ? 'B' : 'U'); } if (use_count) @@ -15910,7 +16620,7 @@ class binary_writer { for (size_t i = 0; i < j.m_data.m_value.binary->size(); ++i) { - oa->write_character(to_char_type('U')); + oa->write_character(to_char_type(bjdata_draft3 ? 'B' : 'U')); oa->write_character(j.m_data.m_value.binary->data()[i]); } } @@ -15927,7 +16637,7 @@ class binary_writer { if (use_bjdata && j.m_data.m_value.object->size() == 3 && j.m_data.m_value.object->find("_ArrayType_") != j.m_data.m_value.object->end() && j.m_data.m_value.object->find("_ArraySize_") != j.m_data.m_value.object->end() && j.m_data.m_value.object->find("_ArrayData_") != j.m_data.m_value.object->end()) { - if (!write_bjdata_ndarray(*j.m_data.m_value.object, use_count, use_type)) // decode bjdata ndarray in the JData format (https://github.com/NeuroJSON/jdata) + if (!write_bjdata_ndarray(*j.m_data.m_value.object, use_count, use_type, bjdata_version)) // decode bjdata ndarray in the JData format (https://github.com/NeuroJSON/jdata) { break; } @@ -15971,7 +16681,7 @@ class binary_writer oa->write_characters( reinterpret_cast(el.first.c_str()), el.first.size()); - write_ubjson(el.second, use_count, use_type, prefix_required, use_bjdata); + write_ubjson(el.second, use_count, use_type, prefix_required, use_bjdata, bjdata_version); } if (!use_count) @@ -16127,7 +16837,8 @@ class binary_writer } else { - JSON_THROW(out_of_range::create(407, concat("integer number ", std::to_string(j.m_data.m_value.number_unsigned), " cannot be represented by BSON as it does not fit int64"), &j)); + write_bson_entry_header(name, 0x11 /* uint64 */); + write_number(static_cast(j.m_data.m_value.number_unsigned), true); } } @@ -16655,10 +17366,11 @@ class binary_writer /*! @return false if the object is successfully converted to a bjdata ndarray, true if the type or size is invalid */ - bool write_bjdata_ndarray(const typename BasicJsonType::object_t& value, const bool use_count, const bool use_type) + bool write_bjdata_ndarray(const typename BasicJsonType::object_t& value, const bool use_count, const bool use_type, const bjdata_version_t bjdata_version) { std::map bjdtype = {{"uint8", 'U'}, {"int8", 'i'}, {"uint16", 'u'}, {"int16", 'I'}, - {"uint32", 'm'}, {"int32", 'l'}, {"uint64", 'M'}, {"int64", 'L'}, {"single", 'd'}, {"double", 'D'}, {"char", 'C'} + {"uint32", 'm'}, {"int32", 'l'}, {"uint64", 'M'}, {"int64", 'L'}, {"single", 'd'}, {"double", 'D'}, + {"char", 'C'}, {"byte", 'B'} }; string_t key = "_ArrayType_"; @@ -16688,10 +17400,10 @@ class binary_writer oa->write_character('#'); key = "_ArraySize_"; - write_ubjson(value.at(key), use_count, use_type, true, true); + write_ubjson(value.at(key), use_count, use_type, true, true, bjdata_version); key = "_ArrayData_"; - if (dtype == 'U' || dtype == 'C') + if (dtype == 'U' || dtype == 'C' || dtype == 'B') { for (const auto& el : value.at(key)) { @@ -16882,11 +17594,11 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2008-2009 Björn Hoehrmann -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2008 - 2009 Björn Hoehrmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -16907,11 +17619,11 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2009 Florian Loitsch -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -17147,10 +17859,10 @@ boundaries compute_boundaries(FloatType value) // v- m- v m+ v+ const bool lower_boundary_is_closer = F == 0 && E > 1; - const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1); + const diyfp m_plus = diyfp((2 * v.f) + 1, v.e - 1); const diyfp m_minus = lower_boundary_is_closer - ? diyfp(4 * v.f - 1, v.e - 2) // (B) - : diyfp(2 * v.f - 1, v.e - 1); // (A) + ? diyfp((4 * v.f) - 1, v.e - 2) // (B) + : diyfp((2 * v.f) - 1, v.e - 1); // (A) // Determine the normalized w+ = m+. const diyfp w_plus = diyfp::normalize(m_plus); @@ -17380,7 +18092,7 @@ inline cached_power get_cached_power_for_binary_exponent(int e) JSON_ASSERT(e >= -1500); JSON_ASSERT(e <= 1500); const int f = kAlpha - e - 1; - const int k = (f * 78913) / (1 << 18) + static_cast(f > 0); + const int k = ((f * 78913) / (1 << 18)) + static_cast(f > 0); const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep; JSON_ASSERT(index >= 0); @@ -17858,15 +18570,15 @@ inline char* append_exponent(char* buf, int e) } else if (k < 100) { - *buf++ = static_cast('0' + k / 10); + *buf++ = static_cast('0' + (k / 10)); k %= 10; *buf++ = static_cast('0' + k); } else { - *buf++ = static_cast('0' + k / 100); + *buf++ = static_cast('0' + (k / 100)); k %= 100; - *buf++ = static_cast('0' + k / 10); + *buf++ = static_cast('0' + (k / 10)); k %= 10; *buf++ = static_cast('0' + k); } @@ -18652,7 +19364,7 @@ class serializer @param[in] x unsigned integer number to count its digits @return number of decimal digits */ - inline unsigned int count_digits(number_unsigned_t x) noexcept + unsigned int count_digits(number_unsigned_t x) noexcept { unsigned int n_digits = 1; for (;;) @@ -18935,7 +19647,7 @@ class serializer ? (byte & 0x3fu) | (codep << 6u) : (0xFFu >> type) & (byte); - const std::size_t index = 256u + static_cast(state) * 16u + static_cast(type); + const std::size_t index = 256u + (static_cast(state) * 16u) + static_cast(type); JSON_ASSERT(index < utf8d.size()); state = utf8d[index]; return state; @@ -18961,7 +19673,7 @@ class serializer * absolute values of INT_MIN and INT_MAX are usually not the same. See * #1708 for details. */ - inline number_unsigned_t remove_sign(number_integer_t x) noexcept + number_unsigned_t remove_sign(number_integer_t x) noexcept { JSON_ASSERT(x < 0 && x < (std::numeric_limits::max)()); // NOLINT(misc-redundant-expression) return static_cast(-(x + 1)) + 1; @@ -19003,10 +19715,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -19031,7 +19743,7 @@ NLOHMANN_JSON_NAMESPACE_BEGIN /// for use within nlohmann::basic_json template , class Allocator = std::allocator>> - struct ordered_map : std::vector, Allocator> + struct ordered_map : std::vector, Allocator> { using key_type = Key; using mapped_type = T; @@ -19346,7 +20058,7 @@ template , template using require_input_iter = typename std::enable_if::iterator_category, - std::input_iterator_tag>::value>::type; + std::input_iterator_tag>::value>::type; template> void insert(InputIt first, InputIt last) @@ -19417,9 +20129,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec friend class ::nlohmann::detail::binary_writer; template friend class ::nlohmann::detail::binary_reader; - template + template friend class ::nlohmann::detail::json_sax_dom_parser; - template + template friend class ::nlohmann::detail::json_sax_dom_callback_parser; friend class ::nlohmann::detail::exception; @@ -19440,7 +20152,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec ) { return ::nlohmann::detail::parser(std::move(adapter), - std::move(cb), allow_exceptions, ignore_comments); + std::move(cb), allow_exceptions, ignore_comments); } private: @@ -19473,6 +20185,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec using error_handler_t = detail::error_handler_t; /// how to treat CBOR tags using cbor_tag_handler_t = detail::cbor_tag_handler_t; + /// how to encode BJData + using bjdata_version_t = detail::bjdata_version_t; /// helper type for initializer lists of basic_json values using initializer_list_t = std::initializer_list>; @@ -19552,7 +20266,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec { basic_json result; - result["copyright"] = "(C) 2013-2023 Niels Lohmann"; + result["copyright"] = "(C) 2013-2025 Niels Lohmann"; result["name"] = "JSON for Modern C++"; result["url"] = "https://github.com/nlohmann/json"; result["version"]["string"] = @@ -19817,7 +20531,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec object = nullptr; // silence warning, see #821 if (JSON_HEDLEY_UNLIKELY(t == value_t::null)) { - JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.11.3", nullptr)); // LCOV_EXCL_LINE + JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.12.0", nullptr)); // LCOV_EXCL_LINE } break; } @@ -20053,10 +20767,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec return it; } - reference set_parent(reference j, std::size_t old_capacity = static_cast(-1)) + reference set_parent(reference j, std::size_t old_capacity = detail::unknown_size()) { #if JSON_DIAGNOSTICS - if (old_capacity != static_cast(-1)) + if (old_capacity != detail::unknown_size()) { // see https://github.com/nlohmann/json/issues/2838 JSON_ASSERT(type() == value_t::array); @@ -20136,8 +20850,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::enable_if_t < !detail::is_basic_json::value && detail::is_compatible_type::value, int > = 0 > basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-forwarding-reference-overload,bugprone-exception-escape) - JSONSerializer::to_json(std::declval(), - std::forward(val)))) + JSONSerializer::to_json(std::declval(), + std::forward(val)))) { JSONSerializer::to_json(*this, std::forward(val)); set_parents(); @@ -20150,6 +20864,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::enable_if_t < detail::is_basic_json::value&& !std::is_same::value, int > = 0 > basic_json(const BasicJsonType& val) +#if JSON_DIAGNOSTIC_POSITIONS + : start_position(val.start_pos()), + end_position(val.end_pos()) +#endif { using other_boolean_t = typename BasicJsonType::boolean_t; using other_number_float_t = typename BasicJsonType::number_float_t; @@ -20196,6 +20914,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } JSON_ASSERT(m_data.m_type == val.type()); + set_parents(); assert_invariant(); } @@ -20332,7 +21051,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template < class InputIT, typename std::enable_if < std::is_same::value || std::is_same::value, int >::type = 0 > - basic_json(InputIT first, InputIT last) + basic_json(InputIT first, InputIT last) // NOLINT(performance-unnecessary-value-param) { JSON_ASSERT(first.m_object != nullptr); JSON_ASSERT(last.m_object != nullptr); @@ -20447,6 +21166,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/basic_json/ basic_json(const basic_json& other) : json_base_class_t(other) +#if JSON_DIAGNOSTIC_POSITIONS + , start_position(other.start_position) + , end_position(other.end_position) +#endif { m_data.m_type = other.m_data.m_type; // check of passed value is valid @@ -20516,15 +21239,24 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/basic_json/ basic_json(basic_json&& other) noexcept : json_base_class_t(std::forward(other)), - m_data(std::move(other.m_data)) + m_data(std::move(other.m_data)) // cppcheck-suppress[accessForwarded] TODO check +#if JSON_DIAGNOSTIC_POSITIONS + , start_position(other.start_position) // cppcheck-suppress[accessForwarded] TODO check + , end_position(other.end_position) // cppcheck-suppress[accessForwarded] TODO check +#endif { // check that passed value is valid - other.assert_invariant(false); + other.assert_invariant(false); // cppcheck-suppress[accessForwarded] // invalidate payload other.m_data.m_type = value_t::null; other.m_data.m_value = {}; +#if JSON_DIAGNOSTIC_POSITIONS + other.start_position = std::string::npos; + other.end_position = std::string::npos; +#endif + set_parents(); assert_invariant(); } @@ -20545,6 +21277,12 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec using std::swap; swap(m_data.m_type, other.m_data.m_type); swap(m_data.m_value, other.m_data.m_value); + +#if JSON_DIAGNOSTIC_POSITIONS + swap(start_position, other.start_position); + swap(end_position, other.end_position); +#endif + json_base_class_t::operator=(std::move(other)); set_parents(); @@ -20766,13 +21504,13 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// get a pointer to the value (integer number) number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept { - return is_number_integer() ? &m_data.m_value.number_integer : nullptr; + return m_data.m_type == value_t::number_integer ? &m_data.m_value.number_integer : nullptr; } /// get a pointer to the value (integer number) constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /*unused*/) const noexcept { - return is_number_integer() ? &m_data.m_value.number_integer : nullptr; + return m_data.m_type == value_t::number_integer ? &m_data.m_value.number_integer : nullptr; } /// get a pointer to the value (unsigned number) @@ -20907,7 +21645,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::has_from_json::value, int > = 0 > ValueType get_impl(detail::priority_tag<0> /*unused*/) const noexcept(noexcept( - JSONSerializer::from_json(std::declval(), std::declval()))) + JSONSerializer::from_json(std::declval(), std::declval()))) { auto ret = ValueType(); JSONSerializer::from_json(*this, ret); @@ -20949,7 +21687,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::has_non_default_from_json::value, int > = 0 > ValueType get_impl(detail::priority_tag<1> /*unused*/) const noexcept(noexcept( - JSONSerializer::from_json(std::declval()))) + JSONSerializer::from_json(std::declval()))) { return JSONSerializer::from_json(*this); } @@ -21099,7 +21837,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::has_from_json::value, int > = 0 > ValueType & get_to(ValueType& v) const noexcept(noexcept( - JSONSerializer::from_json(std::declval(), v))) + JSONSerializer::from_json(std::declval(), v))) { JSONSerializer::from_json(*this, v); return v; @@ -21251,7 +21989,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec { // create better exception explanation JSON_THROW(out_of_range::create(401, detail::concat("array index ", std::to_string(idx), " is out of range"), this)); - } + } // cppcheck-suppress[missingReturn] } else { @@ -21274,7 +22012,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec { // create better exception explanation JSON_THROW(out_of_range::create(401, detail::concat("array index ", std::to_string(idx), " is out of range"), this)); - } + } // cppcheck-suppress[missingReturn] } else { @@ -21419,7 +22157,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief access specified object element /// @sa https://json.nlohmann.me/api/basic_json/operator%5B%5D/ - reference operator[](typename object_t::key_type key) + reference operator[](typename object_t::key_type key) // NOLINT(performance-unnecessary-value-param) { // implicitly convert null value to an empty object if (is_null()) @@ -21729,7 +22467,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template < class IteratorType, detail::enable_if_t < std::is_same::value || std::is_same::value, int > = 0 > - IteratorType erase(IteratorType pos) + IteratorType erase(IteratorType pos) // NOLINT(performance-unnecessary-value-param) { // make sure iterator fits the current value if (JSON_HEDLEY_UNLIKELY(this != pos.m_object)) @@ -21799,7 +22537,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template < class IteratorType, detail::enable_if_t < std::is_same::value || std::is_same::value, int > = 0 > - IteratorType erase(IteratorType first, IteratorType last) + IteratorType erase(IteratorType first, IteratorType last) // NOLINT(performance-unnecessary-value-param) { // make sure iterator fits the current value if (JSON_HEDLEY_UNLIKELY(this != first.m_object || this != last.m_object)) @@ -22566,7 +23304,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @note: This uses std::distance to support GCC 4.8, /// see https://github.com/nlohmann/json/pull/1257 template - iterator insert_iterator(const_iterator pos, Args&& ... args) + iterator insert_iterator(const_iterator pos, Args&& ... args) // NOLINT(performance-unnecessary-value-param) { iterator result(this); JSON_ASSERT(m_data.m_value.array != nullptr); @@ -22585,7 +23323,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts element into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, const basic_json& val) + iterator insert(const_iterator pos, const basic_json& val) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_LIKELY(is_array())) @@ -22605,14 +23343,14 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts element into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, basic_json&& val) + iterator insert(const_iterator pos, basic_json&& val) // NOLINT(performance-unnecessary-value-param) { return insert(pos, val); } /// @brief inserts copies of element into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, size_type cnt, const basic_json& val) + iterator insert(const_iterator pos, size_type cnt, const basic_json& val) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_LIKELY(is_array())) @@ -22632,7 +23370,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts range of elements into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, const_iterator first, const_iterator last) + iterator insert(const_iterator pos, const_iterator first, const_iterator last) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_UNLIKELY(!is_array())) @@ -22663,7 +23401,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts elements from initializer list into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, initializer_list_t ilist) + iterator insert(const_iterator pos, initializer_list_t ilist) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_UNLIKELY(!is_array())) @@ -22683,7 +23421,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts range of elements into object /// @sa https://json.nlohmann.me/api/basic_json/insert/ - void insert(const_iterator first, const_iterator last) + void insert(const_iterator first, const_iterator last) // NOLINT(performance-unnecessary-value-param) { // insert only works for objects if (JSON_HEDLEY_UNLIKELY(!is_object())) @@ -22704,6 +23442,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec } m_data.m_value.object->insert(first.m_it.object_iterator, last.m_it.object_iterator); + set_parents(); } /// @brief updates a JSON object from another object, overwriting existing keys @@ -22715,7 +23454,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief updates a JSON object from another object, overwriting existing keys /// @sa https://json.nlohmann.me/api/basic_json/update/ - void update(const_iterator first, const_iterator last, bool merge_objects = false) + void update(const_iterator first, const_iterator last, bool merge_objects = false) // NOLINT(performance-unnecessary-value-param) { // implicitly convert null value to an empty object if (is_null()) @@ -23316,12 +24055,12 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template JSON_HEDLEY_WARN_UNUSED_RESULT static basic_json parse(InputType&& i, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false) { basic_json result; - parser(detail::input_adapter(std::forward(i)), cb, allow_exceptions, ignore_comments).parse(true, result); + parser(detail::input_adapter(std::forward(i)), std::move(cb), allow_exceptions, ignore_comments).parse(true, result); // cppcheck-suppress[accessMoved,accessForwarded] return result; } @@ -23331,24 +24070,24 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_HEDLEY_WARN_UNUSED_RESULT static basic_json parse(IteratorType first, IteratorType last, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false) { basic_json result; - parser(detail::input_adapter(std::move(first), std::move(last)), cb, allow_exceptions, ignore_comments).parse(true, result); + parser(detail::input_adapter(std::move(first), std::move(last)), std::move(cb), allow_exceptions, ignore_comments).parse(true, result); // cppcheck-suppress[accessMoved] return result; } JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DEPRECATED_FOR(3.8.0, parse(ptr, ptr + len)) static basic_json parse(detail::span_input_adapter&& i, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false) { basic_json result; - parser(i.get(), cb, allow_exceptions, ignore_comments).parse(true, result); + parser(i.get(), std::move(cb), allow_exceptions, ignore_comments).parse(true, result); // cppcheck-suppress[accessMoved] return result; } @@ -23527,6 +24266,23 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec basic_json* m_parent = nullptr; #endif +#if JSON_DIAGNOSTIC_POSITIONS + /// the start position of the value + std::size_t start_position = std::string::npos; + /// the end position of the value + std::size_t end_position = std::string::npos; + public: + constexpr std::size_t start_pos() const noexcept + { + return start_position; + } + + constexpr std::size_t end_pos() const noexcept + { + return end_position; + } +#endif + ////////////////////////////////////////// // binary serialization/deserialization // ////////////////////////////////////////// @@ -23612,27 +24368,30 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/to_bjdata/ static std::vector to_bjdata(const basic_json& j, const bool use_size = false, - const bool use_type = false) + const bool use_type = false, + const bjdata_version_t version = bjdata_version_t::draft2) { std::vector result; - to_bjdata(j, result, use_size, use_type); + to_bjdata(j, result, use_size, use_type, version); return result; } /// @brief create a BJData serialization of a given JSON value /// @sa https://json.nlohmann.me/api/basic_json/to_bjdata/ static void to_bjdata(const basic_json& j, detail::output_adapter o, - const bool use_size = false, const bool use_type = false) + const bool use_size = false, const bool use_type = false, + const bjdata_version_t version = bjdata_version_t::draft2) { - binary_writer(o).write_ubjson(j, use_size, use_type, true, true); + binary_writer(o).write_ubjson(j, use_size, use_type, true, true, version); } /// @brief create a BJData serialization of a given JSON value /// @sa https://json.nlohmann.me/api/basic_json/to_bjdata/ static void to_bjdata(const basic_json& j, detail::output_adapter o, - const bool use_size = false, const bool use_type = false) + const bool use_size = false, const bool use_type = false, + const bjdata_version_t version = bjdata_version_t::draft2) { - binary_writer(o).write_ubjson(j, use_size, use_type, true, true); + binary_writer(o).write_ubjson(j, use_size, use_type, true, true, version); } /// @brief create a BSON serialization of a given JSON value @@ -23668,9 +24427,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23684,9 +24443,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23709,10 +24468,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23725,9 +24484,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23740,9 +24499,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23763,10 +24522,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); + const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23779,9 +24538,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23794,9 +24553,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23817,10 +24576,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); + const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23833,9 +24592,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23848,9 +24607,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23863,9 +24622,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23878,9 +24637,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23901,10 +24660,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); + const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } /// @} @@ -24005,7 +24764,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec // the valid JSON Patch operations enum class patch_operations {add, remove, replace, move, copy, test, invalid}; - const auto get_op = [](const std::string & op) + const auto get_op = [](const string_t& op) { if (op == "add") { @@ -24036,7 +24795,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec }; // wrapper for "add" operation; add value at ptr - const auto operation_add = [&result](json_pointer & ptr, basic_json val) + const auto operation_add = [&result](json_pointer & ptr, const basic_json & val) { // adding to the root of the target document means replacing it if (ptr.empty()) @@ -24142,8 +24901,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec for (const auto& val : json_patch) { // wrapper to get a value for an operation - const auto get_value = [&val](const std::string & op, - const std::string & member, + const auto get_value = [&val](const string_t& op, + const string_t& member, bool string_type) -> basic_json & { // find value @@ -24177,8 +24936,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec } // collect mandatory members - const auto op = get_value("op", "op", true).template get(); - const auto path = get_value(op, "path", true).template get(); + const auto op = get_value("op", "op", true).template get(); + const auto path = get_value(op, "path", true).template get(); json_pointer ptr(path); switch (get_op(op)) @@ -24204,7 +24963,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec case patch_operations::move: { - const auto from_path = get_value("move", "from", true).template get(); + const auto from_path = get_value("move", "from", true).template get(); json_pointer from_ptr(from_path); // the "from" location must exist - use at() @@ -24221,7 +24980,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec case patch_operations::copy: { - const auto from_path = get_value("copy", "from", true).template get(); + const auto from_path = get_value("copy", "from", true).template get(); const json_pointer from_ptr(from_path); // the "from" location must exist - use at() @@ -24281,7 +25040,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/diff/ JSON_HEDLEY_WARN_UNUSED_RESULT static basic_json diff(const basic_json& source, const basic_json& target, - const std::string& path = "") + const string_t& path = "") { // the patch basic_json result(value_t::array); @@ -24311,7 +25070,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec while (i < source.size() && i < target.size()) { // recursive call to compare array values at index i - auto temp_diff = diff(source[i], target[i], detail::concat(path, '/', std::to_string(i))); + auto temp_diff = diff(source[i], target[i], detail::concat(path, '/', detail::to_string(i))); result.insert(result.end(), temp_diff.begin(), temp_diff.end()); ++i; } @@ -24328,7 +25087,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec result.insert(result.begin() + end_index, object( { {"op", "remove"}, - {"path", detail::concat(path, '/', std::to_string(i))} + {"path", detail::concat(path, '/', detail::to_string(i))} })); ++i; } @@ -24339,7 +25098,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec result.push_back( { {"op", "add"}, - {"path", detail::concat(path, "/-")}, + {"path", detail::concat(path, "/-")}, {"value", target[i]} }); ++i; @@ -24354,7 +25113,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec for (auto it = source.cbegin(); it != source.cend(); ++it) { // escape the key name to be used in a JSON patch - const auto path_key = detail::concat(path, '/', detail::escape(it.key())); + const auto path_key = detail::concat(path, '/', detail::escape(it.key())); if (target.find(it.key()) != target.end()) { @@ -24378,7 +25137,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec if (source.find(it.key()) == source.end()) { // found a key that is not in this -> add it - const auto path_key = detail::concat(path, '/', detail::escape(it.key())); + const auto path_key = detail::concat(path, '/', detail::escape(it.key())); result.push_back( { {"op", "add"}, {"path", path_key}, @@ -24559,10 +25318,10 @@ inline void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL& j1, nlohmann::NLOHMANN_BASIC // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -24593,6 +25352,7 @@ inline void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL& j1, nlohmann::NLOHMANN_BASIC #undef JSON_HAS_CPP_14 #undef JSON_HAS_CPP_17 #undef JSON_HAS_CPP_20 + #undef JSON_HAS_CPP_23 #undef JSON_HAS_FILESYSTEM #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM #undef JSON_HAS_THREE_WAY_COMPARISON @@ -24604,10 +25364,10 @@ inline void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL& j1, nlohmann::NLOHMANN_BASIC // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT diff --git a/vendor/nlohmann/json_fwd.hpp b/vendor/nlohmann/json_fwd.hpp new file mode 100644 index 000000000..942917139 --- /dev/null +++ b/vendor/nlohmann/json_fwd.hpp @@ -0,0 +1,187 @@ +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + +#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ +#define INCLUDE_NLOHMANN_JSON_FWD_HPP_ + +#include // int64_t, uint64_t +#include // map +#include // allocator +#include // string +#include // vector + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// This file contains all macro definitions affecting or depending on the ABI + +#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK + #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) + #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 12 || NLOHMANN_JSON_VERSION_PATCH != 0 + #warning "Already included a different version of the library!" + #endif + #endif +#endif + +#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_MINOR 12 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_PATCH 0 // NOLINT(modernize-macro-to-enum) + +#ifndef JSON_DIAGNOSTICS + #define JSON_DIAGNOSTICS 0 +#endif + +#ifndef JSON_DIAGNOSTIC_POSITIONS + #define JSON_DIAGNOSTIC_POSITIONS 0 +#endif + +#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 +#endif + +#if JSON_DIAGNOSTICS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS +#endif + +#if JSON_DIAGNOSTIC_POSITIONS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS _dp +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS +#endif + +#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp +#else + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION + #define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0 +#endif + +// Construct the namespace ABI tags component +#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) json_abi ## a ## b ## c +#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b, c) \ + NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) + +#define NLOHMANN_JSON_ABI_TAGS \ + NLOHMANN_JSON_ABI_TAGS_CONCAT( \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ + NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON, \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS) + +// Construct the namespace version component +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ + _v ## major ## _ ## minor ## _ ## patch +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) + +#if NLOHMANN_JSON_NAMESPACE_NO_VERSION +#define NLOHMANN_JSON_NAMESPACE_VERSION +#else +#define NLOHMANN_JSON_NAMESPACE_VERSION \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \ + NLOHMANN_JSON_VERSION_MINOR, \ + NLOHMANN_JSON_VERSION_PATCH) +#endif + +// Combine namespace components +#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b +#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \ + NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) + +#ifndef NLOHMANN_JSON_NAMESPACE +#define NLOHMANN_JSON_NAMESPACE \ + nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN +#define NLOHMANN_JSON_NAMESPACE_BEGIN \ + namespace nlohmann \ + { \ + inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) \ + { +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_END +#define NLOHMANN_JSON_NAMESPACE_END \ + } /* namespace (inline namespace) NOLINT(readability/namespace) */ \ + } // namespace nlohmann +#endif + + +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +NLOHMANN_JSON_NAMESPACE_BEGIN + +/*! +@brief default JSONSerializer template argument + +This serializer ignores the template arguments and uses ADL +([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) +for serialization. +*/ +template +struct adl_serializer; + +/// a class to store JSON values +/// @sa https://json.nlohmann.me/api/basic_json/ +template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer, + class BinaryType = std::vector, // cppcheck-suppress syntaxError + class CustomBaseClass = void> +class basic_json; + +/// @brief JSON Pointer defines a string syntax for identifying a specific value within a JSON document +/// @sa https://json.nlohmann.me/api/json_pointer/ +template +class json_pointer; + +/*! +@brief default specialization +@sa https://json.nlohmann.me/api/json/ +*/ +using json = basic_json<>; + +/// @brief a minimal map-like container that preserves insertion order +/// @sa https://json.nlohmann.me/api/ordered_map/ +template +struct ordered_map; + +/// @brief specialization that maintains the insertion order of object keys +/// @sa https://json.nlohmann.me/api/ordered_json/ +using ordered_json = basic_json; + +NLOHMANN_JSON_NAMESPACE_END + +#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ diff --git a/tools/mtmd/vendor/stb_image.h b/vendor/stb/stb_image.h similarity index 100% rename from tools/mtmd/vendor/stb_image.h rename to vendor/stb/stb_image.h From b49a8ff96b769b8a4c36d89fb783ec0135be582b Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Fri, 30 May 2025 19:40:57 +0530 Subject: [PATCH 17/21] SYCL: Add mrope kernel (#13755) * SYCL: Add mrope kernel * feat: Optimize rope operations with vectorization Uses `sycl::vec` to load and store two elements at a time, significantly improving performance in `rope_norm`, `rope_neox`, and `rope_multi`. This reduces the number of memory accesses and leverages SIMD instructions for faster execution. * Use ceil_div --- ggml/src/ggml-sycl/ggml-sycl.cpp | 8 -- ggml/src/ggml-sycl/rope.cpp | 129 ++++++++++++++++++++++++++++--- 2 files changed, 118 insertions(+), 19 deletions(-) diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index e96e1f248..bcd2ea536 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4257,14 +4257,6 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_SOFT_MAX: return true; case GGML_OP_ROPE: - { - const int mode = ((const int32_t *) op->op_params)[2]; - // mode is not used as a bitmask in practice, the various rope type modes are independent implementations - if (mode == GGML_ROPE_TYPE_MROPE) { - return false; - } - return true; - } case GGML_OP_IM2COL: return true; case GGML_OP_UPSCALE: diff --git a/ggml/src/ggml-sycl/rope.cpp b/ggml/src/ggml-sycl/rope.cpp index a6516a7e1..44473e1e5 100644 --- a/ggml/src/ggml-sycl/rope.cpp +++ b/ggml/src/ggml-sycl/rope.cpp @@ -49,10 +49,7 @@ static void rope_norm(const T * x, T * dst, const int ne0, const int ne1, const if (i0 >= n_dims) { const int i = row * ne0 + i0; - - dst[i + 0] = x[i + 0]; - dst[i + 1] = x[i + 1]; - + *reinterpret_cast *>(dst + i) = *reinterpret_cast *>(x + i); return; } @@ -93,10 +90,7 @@ static void rope_neox(const T * x, T * dst, const int ne0, const int ne1, const if (i0 >= n_dims) { const int i = row * ne0 + i0; - - dst[i + 0] = x[i + 0]; - dst[i + 1] = x[i + 1]; - + *reinterpret_cast *>(dst + i) = *reinterpret_cast *>(x + i); return; } @@ -122,6 +116,63 @@ static void rope_neox(const T * x, T * dst, const int ne0, const int ne1, const dst[i + n_dims / 2] = x0 * sin_theta + x1 * cos_theta; } +template +static void rope_multi(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, + const size_t s2, const int n_dims, const int32_t * pos, const float freq_scale, + const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, + const float theta_scale, const float * freq_factors, const mrope_sections sections, + const sycl::nd_item<3> & item_ct1) { + // get index pos + const int i0 = 2 * (item_ct1.get_group(1) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1)); + if (i0 >= ne0) { + return; + } + const int row_dst = (item_ct1.get_group(2) * item_ct1.get_local_range(2)) + item_ct1.get_local_id(2); + + if (i0 >= n_dims) { + const int i = row_dst*ne0 + i0; + *reinterpret_cast *>(dst + i) = *reinterpret_cast *>(x + i); + return; + } + + const int row_x = row_dst % ne1; + const int channel_x = row_dst / ne1; + const int idst = (row_dst * ne0) + (i0 / 2); + const size_t ix = ((size_t) channel_x * s2) + ((size_t) row_x * s1) + (i0 / 2); + + const int sect_dims = sections.v[0] + sections.v[1] + sections.v[2] + sections.v[3]; + const int sec_w = sections.v[1] + sections.v[0]; + const int sector = (i0 / 2) % sect_dims; + + + float theta_base = 0.0; + if (sector < sections.v[0]) { + theta_base = pos[channel_x]*sycl::pow(theta_scale, i0/2.0f); + } + else if (sector >= sections.v[0] && sector < sec_w) { + theta_base = pos[channel_x + ne2 * 1]*sycl::pow(theta_scale, i0/2.0f); + } + else if (sector >= sec_w && sector < sec_w + sections.v[2]) { + theta_base = pos[channel_x + ne2 * 2]*sycl::pow(theta_scale, i0/2.0f); + } + else if (sector >= sec_w + sections.v[2]) { + theta_base = pos[channel_x + ne2 * 3]*sycl::pow(theta_scale, i0/2.0f); + } + + const float freq_factor = has_ff ? freq_factors[i0 / 2] : 1.0f; + float cos_theta; + float sin_theta; + rope_yarn(theta_base / freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta); + const float x0 = x[ix + 0]; + const float x1 = x[ix + n_dims/2]; + + // store results in dst + dst[idst + 0] = x0 * cos_theta - x1 * sin_theta; + dst[idst + n_dims/2] = x0 * sin_theta + x1 * cos_theta; +} + + + template static void rope_vision(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, const size_t s2, const int n_dims, const int32_t * pos, const float freq_scale, @@ -171,7 +222,7 @@ static void rope_norm_sycl(const T * x, T * dst, const int ne0, const int ne1, c const float * freq_factors, queue_ptr stream) { GGML_ASSERT(ne0 % 2 == 0); const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); - const int num_blocks_x = (ne0 + 2 * SYCL_ROPE_BLOCK_SIZE - 1) / (2 * SYCL_ROPE_BLOCK_SIZE); + const int num_blocks_x = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE)); const sycl::range<3> block_nums(1, num_blocks_x, nr); const float theta_scale = powf(freq_base, -2.0f / n_dims); @@ -208,7 +259,7 @@ static void rope_neox_sycl(const T * x, T * dst, const int ne0, const int ne1, c const rope_corr_dims corr_dims, const float * freq_factors, queue_ptr stream) { GGML_ASSERT(ne0 % 2 == 0); const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); - const int num_blocks_x = (ne0 + 2 * SYCL_ROPE_BLOCK_SIZE - 1) / (2 * SYCL_ROPE_BLOCK_SIZE); + const int num_blocks_x = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE)); const sycl::range<3> block_nums(1, num_blocks_x, nr); const float theta_scale = powf(freq_base, -2.0f / n_dims); @@ -228,6 +279,40 @@ static void rope_neox_sycl(const T * x, T * dst, const int ne0, const int ne1, c } } +template +static void rope_multi_sycl(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, + const size_t s2, const int n_dims, const int nr, const int32_t * pos, + const float freq_scale, const float freq_base, const float ext_factor, + const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors, + const mrope_sections sections, queue_ptr stream) { + GGML_ASSERT(ne0 % 2 == 0); + const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); + const int n_blocks_y = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE)); + const sycl::range<3> grid_dims(1, n_blocks_y, nr); + const sycl::nd_range<3> nd_range(grid_dims * block_dims, block_dims); + + const float theta_scale = std::pow(freq_base, -2.0f / n_dims); + // Add FP16 capability check if T could be sycl::half + if constexpr (std::is_same_v) { + dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); + } + // launch kernel + if (freq_factors == nullptr) { + stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + rope_multi(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, + corr_dims, theta_scale, freq_factors, sections, item_ct1); + }); + } else { + stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + rope_multi(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, + corr_dims, theta_scale, freq_factors, sections, item_ct1); + }); + } +} + + + + // rope vision template static void rope_vision_sycl(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, @@ -237,7 +322,7 @@ static void rope_vision_sycl(const T * x, T * dst, const int ne0, const int ne1, const mrope_sections sections, queue_ptr stream) { GGML_ASSERT(ne0 % 2 == 0); const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); - const int n_blocks_y = (ne0 + 2 * SYCL_ROPE_BLOCK_SIZE - 1) / (2 * SYCL_ROPE_BLOCK_SIZE); + const int n_blocks_y = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE)); const sycl::range<3> grid_dims(1, n_blocks_y, nr); const sycl::nd_range<3> nd_range(grid_dims * block_dims, block_dims); @@ -298,8 +383,17 @@ inline void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) memcpy(§ions.v, (int32_t *) dst->op_params + 11, sizeof(int)*4); const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; + const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; const bool is_vision = mode == GGML_ROPE_TYPE_VISION; + if (is_mrope) { + GGML_ASSERT(sections.v[0] > 0 || sections.v[1] > 0 || sections.v[2] > 0); + } + + if (is_vision) { + GGML_ASSERT(n_dims == ne00/2); + } + const int32_t * pos = (const int32_t *) dst->src[1]->data; const float * freq_factors = nullptr; @@ -326,6 +420,19 @@ inline void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) } else { GGML_ABORT("fatal error"); } + } else if (is_mrope && !is_vision) { + GGML_SYCL_DEBUG("%s: mrope path\n", __func__); + if (dst->src[0]->type == GGML_TYPE_F16) { + rope_multi_sycl((const sycl::half *)dst->src[0]->data, (sycl::half *)dst->data, ne00, ne01, ne02, s01, + s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, + freq_factors, sections, main_stream); + } else if (dst->src[0]->type == GGML_TYPE_F32) { + rope_multi_sycl((const float *) dst->src[0]->data, (float *) dst->data, ne00, ne01, ne02, s01, s02, n_dims, + nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, + main_stream); + } else { + GGML_ABORT("Fatal error: Tensor type unsupported!"); + } } else if (is_vision) { GGML_SYCL_DEBUG("%s: vision path\n", __func__); if (dst->src[0]->type == GGML_TYPE_F16) { From df0c0c7d02f951dc639d48fd536f79200460ac83 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Fri, 30 May 2025 07:37:18 -0700 Subject: [PATCH 18/21] cuda : prevent using split buffers with 3d/4d matrices (#13919) --- ggml/src/ggml-cuda/ggml-cuda.cu | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index c442a6492..009ed9048 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2994,9 +2994,12 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g { struct ggml_tensor * a = op->src[0]; struct ggml_tensor * b = op->src[1]; - // for small weight matrices the active device can end up without any rows, don't use row split in those cases - // this avoids some edge cases (and the performance would not be good anyways) if (a->buffer && ggml_backend_buft_is_cuda_split(a->buffer->buft)) { + if (a->ne[2] > 1 || a->ne[3] > 1) { + return false; + } + // for small weight matrices the active device can end up without any rows, don't use row split in those cases + // this avoids some edge cases (and the performance would not be good anyways) ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) a->buffer->buft->context; int64_t row_low; int64_t row_high; From dd665cc9d4b378626cde11ea0c4c91f514fdc994 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 30 May 2025 19:38:07 +0300 Subject: [PATCH 19/21] parallel : increase the variability of the prompt lengths (#13927) ggml-ci --- examples/parallel/README.md | 2 +- examples/parallel/parallel.cpp | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/parallel/README.md b/examples/parallel/README.md index ece3a6641..2468a30d2 100644 --- a/examples/parallel/README.md +++ b/examples/parallel/README.md @@ -4,7 +4,7 @@ Simplified simulation of serving incoming requests in parallel ## Example -Generate 128 client requests (`-ns 128`), simulating 8 concurrent clients (`-np 8`). The system prompt is shared (`-pps`), meaning that it is computed once at the start. The client requests consist of 10 junk questions (`-j 10`) followed by the actual question. +Generate 128 client requests (`-ns 128`), simulating 8 concurrent clients (`-np 8`). The system prompt is shared (`-pps`), meaning that it is computed once at the start. The client requests consist of up to 10 junk questions (`--junk 10`) followed by the actual question. ```bash llama-parallel -m model.gguf -np 8 -ns 128 --top-k 1 -pps --junk 10 -c 16384 diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index acb1301a2..931ea0035 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -315,7 +315,10 @@ int main(int argc, char ** argv) { } else { client.prompt += k_system; } - for (int i = 0; i < n_junk; ++i) { + + const int n_junk_cur = rand() % n_junk; + + for (int i = 0; i < n_junk_cur; ++i) { const int r = rand() % k_questions.size(); client.prompt += "User:\n" + k_questions[r] + "\nAssistant:\n " + k_answers[r] + "\n"; } @@ -340,7 +343,7 @@ int main(int argc, char ** argv) { client.n_decoded = 0; client.i_batch = batch.n_tokens - 1; - LOG_INF("\033[31mClient %3d, seq %4d, started decoding ...\033[0m\n", client.id, client.seq_id); + LOG_INF("\033[31mClient %3d, seq %4d, junk = %4d, started decoding ...\033[0m\n", client.id, client.seq_id, n_junk_cur); g_seq_id += 1; From b47ab7b8e9c4f6154eb6abb6234866ab117d64c7 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Fri, 30 May 2025 09:56:19 -0700 Subject: [PATCH 20/21] sched : avoid changing cur_copy when a graph is already allocated (#13922) --- ggml/src/ggml-backend.cpp | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index 1f40f10e8..b1050ad59 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -1340,7 +1340,10 @@ static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) { // allocate graph if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { // the re-allocation may cause the split inputs to be moved to a different address - ggml_backend_sched_synchronize(sched); + // synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy + for (int i = 0; i < sched->n_backends; i++) { + ggml_backend_synchronize(sched->backends[i]); + } #ifndef NDEBUG GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed); #endif @@ -1564,7 +1567,6 @@ bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgra ggml_backend_sched_split_graph(sched, graph); - if (!ggml_backend_sched_alloc_splits(sched)) { return false; } @@ -1598,9 +1600,12 @@ void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) { for (int i = 0; i < sched->n_backends; i++) { ggml_backend_synchronize(sched->backends[i]); } - // reset the current copy to 0 so that the graphs will be similar during generation - // necessary for CUDA graphs - sched->cur_copy = 0; + if (!sched->is_alloc) { + // if the graph is not already allocated, always use copy 0 after a synchronization + // this ensures that during generation the same copy is used every time, + // which avoids changes in the graph that could cause CUDA or other graphs to be disabled + sched->cur_copy = 0; + } } void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) { From e562eece7cb476276bfc4cbb18deb7c0369b2233 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Fri, 30 May 2025 21:22:03 +0200 Subject: [PATCH 21/21] CUDA: fix typo in FlashAttention code (#13926) --- ggml/src/ggml-cuda/fattn-mma-f16.cuh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/fattn-mma-f16.cuh b/ggml/src/ggml-cuda/fattn-mma-f16.cuh index 7120053b6..925f39e89 100644 --- a/ggml/src/ggml-cuda/fattn-mma-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-mma-f16.cuh @@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16( NO_DEVICE_CODE; return; } -#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");