//This is Concedo's shitty adapter for adding python bindings for llama //Considerations: //Don't want to use pybind11 due to dependencies on MSVCC //ZERO or MINIMAL changes as possible to main.cpp - do not move their function declarations here! //Leave main.cpp UNTOUCHED, We want to be able to update the repo and pull any changes automatically. //No dynamic memory allocation! Setup structs with FIXED (known) shapes and sizes for ALL output fields //Python will ALWAYS provide the memory, we just write to it. #include #include #include #include #include #include "model_adapter.h" #include "otherarch.h" #include "llama.h" #include #include #include #include #include #include #include #include "utils.h" //for easier compilation //concat source files into one file for compilation purposes #include "llama_v2.cpp" #include "llama_v3.cpp" #include "src/llama.cpp" #include "gptj_v1.cpp" #include "gptj_v2.cpp" #include "gptj_v3.cpp" #include "gpt2_v1.cpp" #include "gpt2_v2.cpp" #include "gpt2_v3.cpp" #include "rwkv_v2.cpp" #include "rwkv_v3.cpp" #include "neox_v2.cpp" #include "neox_v3.cpp" #include "mpt_v3.cpp" #include "tools/mtmd/clip.h" #include "tools/mtmd/llava.h" #include "tools/mtmd/mtmd-audio.h" #include "common/common.h" #if defined(GGML_USE_HIP) // for rocblas_initialize() #include "rocblas/rocblas.h" #endif //const const int extra_context_handle_fragmentation = 128; const int MEDIA_TOKEN_IDENTIFIER_A = -998; //alternate between both, changing when image changes const int MEDIA_TOKEN_IDENTIFIER_B = -999; //shared std::string executable_path = ""; std::string lora_filename = ""; std::string mmproj_filename = ""; std::string draftmodel_filename = ""; int speculative_chunk_amt = 8; //do it in chunks of this many tokens bool generation_finished; bool audio_multimodal_supported = false; bool vision_multimodal_supported = false; float last_process_time = 0; float last_eval_time = 0; int last_token_count = 0; int last_input_count = 0; int last_seed = -1; int total_gens = 0; int last_draft_success = 0; int last_draft_failed = 0; stop_reason last_stop_reason = stop_reason::INVALID; std::vector generated_tokens; llama_grammar * grammar = nullptr; //currently used grammar llama_grammar_parser parsed_grammar; static std::string current_grammar = ""; //return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) static FileFormat file_format = FileFormat::BADFORMAT; static FileFormatExtraMeta file_format_meta; static gpt_vocab vocab; static int32_t n_vocab = 0; static gptj_v1_model gptj_ctx_v1; static gptj_v2_model gptj_ctx_v2; static gptj_model gptj_ctx_v3; static gpt2_v1_model gpt2_ctx_v1; static gpt2_v2_model gpt2_ctx_v2; static gpt2_model gpt2_ctx_v3; static gpt_neox_v2_model neox_ctx_v2; static gpt_neox_model neox_ctx_v3; static mpt_model mpt_ctx_v3; static rwkv_v2_context * rwkv_ctx_v2 = nullptr; static rwkv_context * rwkv_ctx_v3 = nullptr; static llama_v2_context * llama_ctx_v2 = nullptr; static llama_v3_context * llama_ctx_v3 = nullptr; static llama_context * llama_ctx_v4 = nullptr; static llama_context * draft_ctx = nullptr; //will remain null if speculative is unused static llama_context * guidance_ctx = nullptr; //for classifier free guidance, will be null if unused static clip_ctx * clp_ctx_v = nullptr; //for llava static clip_image_u8 * clp_img_data = nullptr; //most recent image static clip_ctx * clp_ctx_a = nullptr; //for audio multimodal static std::unique_ptr audio_preproc; //for audio processing static std::vector media_objects; static std::vector last_media_mem; //for storing dummy tokens that will be consumed by llava static std::string media_composite_image_signature = ""; //for identifying when the llava images change, we need to invalidate the cache static int current_media_identifier = MEDIA_TOKEN_IDENTIFIER_A; static int vision_max_res = 2048; static bool use_mrope = false; static kcpp_params * kcpp_data = nullptr; static int max_context_limit_at_load = 0; static int n_past = 0; static int debugmode = 0; //-1 = hide all, 0 = normal, 1 = showall static bool is_quiet = false; static std::vector last_n_tokens; static std::vector current_context_tokens; static std::vector loaded_latest_logits; //do not use normally, this is only required when loading state happens and we need to override logits static size_t mem_per_token = 0; static std::vector logits; static std::vector smartcontext; static float adaptive_p_weighted_sum = 0; //adaptive p sampling state vars static float adaptive_p_total_weight = 0; static std::vector stop_sequence; static std::vector special_stop_sequence; //for stop sequences that don't have a string representation static std::vector banned_tokens; static std::vector banned_token_ids; static std::vector toolcall_prevented_ids; //temp ban these id for the first 3 tokens generated, to prevent empty replies static std::vector banned_phrases; static std::unordered_multimap> dry_sequence_breakers; // Multi-mapping from first token of sequence to tail of sequence (tail is empty for a single token) static std::vector dry_repeat_count; // Indexed as last_n_tokens static std::unordered_map dry_max_token_repeat; static std::vector top_picks_history; static int remaining_tokens = 0; static bool early_abort = false; static std::mutex concat_output_mtx; static std::string concat_output = ""; static std::string concat_output_reader_copy_poll = ""; //for streaming static std::string concat_output_reader_copy_res = ""; //for gen response static std::vector logit_biases; static bool add_bos_token = true; // if set to false, mmproj handling breaks. dont disable unless you know what you're doing static bool load_guidance = false; //whether to enable cfg for negative prompts static bool check_slowness = false; //will display a suggestion to use highpriority if slow static bool showed_rnn_warning = false; static bool highpriority = false; static int rnn_reusable_slot_idx = -1; static int delayed_generated_tokens_limit = 0; std::deque delayed_generated_tokens; //for use with antislop sampling static std::map> antislop_banned_token_ids; //first is the npast position, second is the array of banned ids at that index static int savestate_limit = 0; static std::vector savestates; inline int kcpp_cpu_has_blas(void) { #if defined(GGML_USE_BLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_SYCL) return 1; #else return 0; #endif } inline bool IsNanCheck(float f) { const unsigned int u = *(unsigned int*)&f; return (u&0x7F800000) == 0x7F800000 && (u&0x7FFFFF); // Both NaN and qNan. } inline bool LogitsDuplicated(std::vector & arr1, std::vector & arr2) { int compareQty = 5; if(arr1.size() < compareQty || arr2.size() < compareQty || arr1.size()!=arr2.size()) { printf("\nError: Logit array sizes are bad!\n"); return false; } for(int i=0;i & output_tokens, FileFormat file_format, bool add_bos) { if (file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2 || file_format == FileFormat::GGJT_3 || file_format == FileFormat::GGUF_GENERIC) { if(file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2 ) { output_tokens = ::llama_v2_tokenize(llama_ctx_v2, str_to_tokenize, add_bos); } else if (file_format == FileFormat::GGML) { output_tokens = ::legacy_llama_v2_tokenize(llama_ctx_v2, str_to_tokenize, add_bos); } else if (file_format == FileFormat::GGJT_3) { output_tokens = ::llama_v3_tokenize(llama_ctx_v3, str_to_tokenize, add_bos); } else { output_tokens = ::common_tokenize(llama_ctx_v4, str_to_tokenize, add_bos, true); if(add_bos) { const llama_vocab * tmpvocab = llama_model_get_vocab(llama_get_model(llama_ctx_v4)); llama_token bostoadd = llama_vocab_bos(tmpvocab); if(bostoadd != LLAMA_TOKEN_NULL) //if bos does not exist, do not add it { if(output_tokens.size()==0) { output_tokens.push_back(bostoadd); } else { if(output_tokens[0]!=bostoadd) { output_tokens.insert(output_tokens.begin(), 1, bostoadd); } } } } } } else { // tokenize the prompt output_tokens = ::gpt_tokenize(vocab, str_to_tokenize); } } static int GetEosID(FileFormat file_format, int32_t n_vocab) { unsigned int eosID = 0; if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2 || file_format == FileFormat::GGJT_3 || file_format == FileFormat::GGUF_GENERIC) { if(file_format == FileFormat::GGUF_GENERIC) { const llama_vocab * tmpvocab = llama_model_get_vocab(llama_get_model(llama_ctx_v4)); eosID = llama_vocab_eos(tmpvocab); } else if(file_format == FileFormat::GGJT_3) { eosID = llama_v3_token_eos(); } else { eosID = llama_v3_token_eos(); } } else { if (file_format == FileFormat::GPT2_1 || file_format == FileFormat::GPT2_2 || file_format == FileFormat::GPT2_3 || file_format == FileFormat::GPT2_4 || file_format == FileFormat::GPTJ_1 || file_format == FileFormat::GPTJ_2 || file_format == FileFormat::GPTJ_3 || file_format == FileFormat::GPTJ_4 || file_format == FileFormat::GPTJ_5) { eosID = 50256; if (n_vocab <= eosID) { //special case, starcoder models use ID 0 for EOS eosID = 0; } } if (file_format == FileFormat::RWKV_1 || file_format == FileFormat::RWKV_2 || file_format == FileFormat::NEOX_1 || file_format == FileFormat::NEOX_2 || file_format == FileFormat::NEOX_3 || file_format == FileFormat::NEOX_4 || file_format == FileFormat::NEOX_5 || file_format == FileFormat::NEOX_6 || file_format == FileFormat::NEOX_7 || file_format == FileFormat::MPT_1) { eosID = 0; } } return eosID; } static std::vector GetEogIDs(FileFormat file_format, int32_t n_vocab) { std::vector alleogs; int eos = GetEosID(file_format, n_vocab); if(file_format == FileFormat::GGUF_GENERIC) { const llama_vocab * tmpvocab = llama_model_get_vocab(llama_get_model(llama_ctx_v4)); int eot = llama_vocab_eot(tmpvocab); std::set eogs = tmpvocab->get_eogs(); if (eot >= 0) { eogs.insert(eot); } if (eos >= 0) { eogs.insert(eos); } alleogs = std::vector(eogs.begin(), eogs.end()); } else { if (eos >= 0) { alleogs.push_back(eos); } } return alleogs; } static float LowestLogit(const std::vector & logits) { int topid = std::min_element(logits.begin(), logits.end()) - logits.begin(); float v = logits[topid]; return (v < 0 ? (v-8) : 0); } static float LowestLogit(const float *logits, size_t size) { if (size == 0) { // Handle the case of an empty array return 0.0; } int topid = std::min_element(logits, logits + size) - logits; float v = logits[topid]; return (v < 0 ? (v-8) : 0); } static std::string RemoveBell(const std::string & input) //removes the bell character { std::string word2; std::remove_copy(input.begin(), input.end(), std::back_inserter(word2), '\a'); return word2; } static std::string get_tok_vec_str(std::vector &embd) { std::string tmp = ""; for (auto id : embd) { tmp += "'" + FileFormatTokenizeID(id, file_format, true) + " (" + std::to_string(id) + ")', "; } ::utreplace(tmp, "\n", "\\n"); return tmp; } static void print_tok_vec_str(std::vector &vec) { printf("\n[%s]\n", get_tok_vec_str(vec).c_str()); } bool allExtendedUnicode(const std::string& str) { if(str.size()==0) { return false; } for (unsigned char c : str) { if (c <= 127) { return false; } } return true; } void print_fitted_params(const llama_model_params & mparams, const llama_context_params & cparams) { std::cout << "-c " << cparams.n_ctx; std::cout << " -ngl " << mparams.n_gpu_layers; size_t nd = llama_max_devices(); while (nd > 1 && mparams.tensor_split[nd - 1] == 0.0f) { nd--; } if (nd > 1) { for (size_t id = 0; id < nd; id++) { if (id == 0) { std::cout << " -ts "; } if (id > 0) { std::cout << ","; } std::cout << mparams.tensor_split[id]; } } const size_t ntbo = llama_max_tensor_buft_overrides(); for (size_t itbo = 0; itbo < ntbo && mparams.tensor_buft_overrides[itbo].pattern != nullptr; itbo++) { if (itbo == 0) { std::cout << " -ot "; } if (itbo > 0) { std::cout << ","; } std::cout << mparams.tensor_buft_overrides[itbo].pattern << "=" << ggml_backend_buft_name(mparams.tensor_buft_overrides[itbo].buft); } std::cout << "\n"; } // Find tokens that completely contain `str`, either as a single token, or as a sequence of tokens. // It's important to use a hash map for head tokens because some models have many of them. // For example, the Llama 3 tokenizer has 6570 tokens containing the period ('.') character. // Single tokens are allowed to extend past `str` at the front and back. This is to allow, for // instance, the token '.\n' to be a head for both '.' and '\n'. However if a head token // begins a multi-token sequence, the head can only extend past `str` at the beginning. The // tail tokens are generated by tokenizing the remainder. // If max_tail_len is >= 0, the maximum token length of a tail sequence is clamped to this value. static void GetOverlappingTokenSequences(const std::string& str, std::unordered_multimap>& token_sequences, int max_tail_len = -1) { bool isAllExtendedUnicode = allExtendedUnicode(str); for(int v=0;vsecond.empty()) { empty = true; break; } } if (!empty) { token_sequences.emplace(v, std::vector()); } } else { // Check whether a prefix of the string overlaps with a suffix of the token. // Just do a naive O(N^2) search, since the worst case is limited by the // maximum character length of a token in the vocabulary. size_t word_len = word.size(), str_len = str.size(); size_t pos = -1; while ((pos = word.find(str[0], pos + 1)) != std::string::npos) { bool match = true; size_t i; for (i = 1; i < str_len && i + pos < word_len; ++i) { if (word[pos + i] != str[i]) { match = false; break; } } if (match && !isAllExtendedUnicode) { // We matched to the end of the string. Since `str` is not contained in `word`, // there must be trailing letters in `str`. std::vector tokenization; TokenizeString(str.substr(i), tokenization, file_format, false); if (max_tail_len >= 0 && tokenization.size() > max_tail_len) { tokenization.resize(max_tail_len); } // Ensure we don't already have a duplicate matching tokenization. auto its = token_sequences.equal_range(v); bool found = false; for (auto it = its.first; it != its.second; ++it) { if (tokenization == it->second) { found = true; break; } } if (!found) { token_sequences.emplace(v, tokenization); } } } } } } // Function to convert a UTF-8 encoded string to lowercase static std::string toLowerCase(const std::string& str) { std::string result; std::locale loc; for (char ch : str) { result += std::tolower(ch, loc); // Use locale-aware tolower } return result; } bool ContextRewind(std::vector &embd, std::vector ¤t_context_tokens, int &n_past, std::vector &last_n_tokens, const int amount_rewind) { if(amount_rewind<=0 || current_context_tokens.size()==0) { return true; //do nothing } if(embd.size()>1) { printf("\nWARNING: Don't use context rewind when in batch processing phase!\n"); return false; } bool is_recurrent = false; if(file_format==FileFormat::GGUF_GENERIC) { const llama_model * mdl = llama_get_model(llama_ctx_v4); if(llama_model_is_recurrent(mdl) || llama_model_is_hybrid(mdl)) { is_recurrent = true; } } if(file_format == FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2 || is_recurrent) { if(!showed_rnn_warning) { showed_rnn_warning = true; printf("\n!!!\nWARNING: RNN models do not support context rewind! Anti-Slop sampler will not work!\n!!!\n"); } return false; } if (amount_rewind >= last_n_tokens.size()) { last_n_tokens.clear(); } else { last_n_tokens.resize(last_n_tokens.size() - amount_rewind); } if(amount_rewind >= top_picks_history.size()) { top_picks_history.clear(); } else { top_picks_history.resize(top_picks_history.size() - amount_rewind); } if (amount_rewind >= current_context_tokens.size()) { current_context_tokens.clear(); } else { current_context_tokens.resize(current_context_tokens.size() - amount_rewind); } if (amount_rewind >= n_past) { n_past = 0; } else { n_past -= amount_rewind; } if (file_format == FileFormat::GGUF_GENERIC) { llama_memory_seq_rm(llama_get_memory(llama_ctx_v4), 0, n_past, -1); if(draft_ctx) { llama_memory_seq_rm(llama_get_memory(draft_ctx), 0, n_past, -1); } } embd.clear(); if(current_context_tokens.size()>0) { embd.push_back(current_context_tokens[current_context_tokens.size()-1]); } return true; } const char * kcpp_print_system_info(void) { ggml_cpu_init(); // some ARM features are detected at runtime static std::string s; s = ""; s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | "; s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | "; s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | "; s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | "; s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | "; s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | "; s += "AVX512_BF16 = " + std::to_string(ggml_cpu_has_avx512_bf16()) + " | "; s += "AMX_INT8 = " + std::to_string(ggml_cpu_has_amx_int8()) + " | "; s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | "; s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | "; s += "SVE = " + std::to_string(ggml_cpu_has_sve()) + " | "; s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | "; s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | "; s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | "; s += "RISCV_VECT = " + std::to_string(ggml_cpu_has_riscv_v()) + " | "; s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | "; s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | "; s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | "; s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | "; s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | "; s += "LLAMAFILE = " + std::to_string(ggml_cpu_has_llamafile()) + " | "; return s.c_str(); } //loads a model for speculative decoding. static void speculative_decoding_setup(std::string spec_model_filename, const llama_model_params & base_model_params, const llama_context_params & base_ctx_params, int base_n_vocab, const float * draft_gpusplit, int draft_gpulayers) { llama_model_params draft_model_params = llama_model_default_params(); llama_context_params draft_ctx_params = llama_context_default_params(); draft_model_params.use_mmap = base_model_params.use_mmap; draft_model_params.use_mlock = base_model_params.use_mlock; draft_model_params.use_direct_io = base_model_params.use_direct_io; draft_model_params.n_gpu_layers = draft_gpulayers; //layers offload the speculative model. draft_model_params.devices = base_model_params.devices; draft_ctx_params.n_ctx = base_ctx_params.n_ctx; draft_ctx_params.offload_kqv = base_ctx_params.offload_kqv; draft_model_params.main_gpu = base_model_params.main_gpu; draft_model_params.split_mode = llama_split_mode::LLAMA_SPLIT_MODE_LAYER; draft_ctx_params.kv_unified = base_ctx_params.kv_unified; #if defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) bool ts_all_zero = true; for (int i = 0; i < tensor_split_max; ++i) { if (draft_gpusplit[i] != 0.0f) { ts_all_zero = false; break; } } if(!ts_all_zero) { printf("\nApplying Draft GPU Split...\n"); draft_model_params.tensor_split = draft_gpusplit; } #endif draft_ctx_params.n_batch = base_ctx_params.n_batch; draft_ctx_params.n_ubatch = base_ctx_params.n_ubatch; draft_ctx_params.n_threads = base_ctx_params.n_threads; draft_ctx_params.n_threads_batch = base_ctx_params.n_threads_batch; draft_ctx_params.flash_attn_type = base_ctx_params.flash_attn_type; draft_ctx_params.type_k = base_ctx_params.type_k; draft_ctx_params.type_v = base_ctx_params.type_v; draft_ctx_params.swa_full = base_ctx_params.swa_full; llama_model * draftmodel = llama_model_load_from_file(spec_model_filename.c_str(), draft_model_params); draft_ctx = llama_init_from_model(draftmodel, draft_ctx_params); if(draft_ctx == NULL) { printf("Error: failed to load speculative decoding draft model '%s'\n", spec_model_filename.c_str()); printf("Speculative Decoding will not be used!\n"); } else { const llama_vocab * tmpvocab = llama_model_get_vocab(draftmodel); int draftvocab = llama_vocab_n_tokens(tmpvocab); if(llama_model_is_recurrent(draftmodel) || llama_model_is_hybrid(draftmodel)) { printf("Error: Speculative decoding cannot be used with Recurrent draft models!\n"); llama_free(draft_ctx); draft_ctx = nullptr; } else if(draftvocab!=base_n_vocab) { if(debugmode==1) { printf("WARNING: Draft model vocab of (%d) does not match base vocab of (%d).\nIn debug mode, this restriction is bypassed. However, speculative decoding may malfunction!\n",draftvocab,base_n_vocab); } else { int diff = abs(draftvocab-base_n_vocab); if(diff <= 256) { //allow small differences to work printf("WARNING: Draft model vocab of (%d) does not match base vocab of (%d).\nSpeculative decoding may malfunction!\n",draftvocab,base_n_vocab); } else { printf("Error: Draft model vocab of (%d) is too different from base vocab of (%d). Speculative decoding cannot be used!\n",draftvocab,base_n_vocab); printf("If you REALLY want to override this, run in --debugmode and this restriction will be disabled. However, you might encounter unwanted results!\n"); llama_free(draft_ctx); draft_ctx = nullptr; } } } } } static speculative_draft_result speculative_decoding_eval_chunk(llama_context * draft_ctx, llama_context * main_ctx, const llama_tokens & embd, const int n_vocab, const int & n_past) { speculative_draft_result results; results.draft_success = false; if(embd.size()==0) { printf("\nERROR: Speculate on empty batch!\n"); return results; } if(embd.size()>1) { printf("\nERROR: Speculative decoding applied on large batch!\n"); return results; } int draft_npast = n_past; int actual_npast = n_past; std::vector temp_embd; std::vector drafted_ids; temp_embd.push_back(embd[0]); drafted_ids.push_back(embd[0]); for(int i=0;i real_embd = drafted_ids; real_embd.pop_back(); kcpp_embd_batch batch2 = kcpp_embd_batch(real_embd, actual_npast, use_mrope, true); auto draftok = (llama_decode(main_ctx, batch2.batch)==0); //actual eval for big model if(!draftok) { printf("\nERROR: Speculative draft model 2 failed!\n"); return results; } results.drafted_amount = 0; for(int i=0;isize > 0)) { throw std::runtime_error("No valid candidates during sampling. Current request aborted!"); } GGML_ASSERT(cur_p->size > 0); // Sort the logits in descending order if (!cur_p->sorted && do_sort) { std::sort(cur_p->data, cur_p->data + cur_p->size, [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; }); cur_p->sorted = true; } float max_l = cur_p->data[0].logit; if (!cur_p->sorted) { for (size_t i = 1; i < cur_p->size; ++i) { max_l = std::max(max_l, cur_p->data[i].logit); } } float cum_sum = 0.0f; for (size_t i = 0; i < cur_p->size; ++i) { float p = expf(cur_p->data[i].logit - max_l); cur_p->data[i].p = p; cum_sum += p; } for (size_t i = 0; i < cur_p->size; ++i) { cur_p->data[i].p /= cum_sum; } } void sample_top_k(llama_token_data_array * cur_p, int32_t k) { // TODO: move bucket sort to separate function so that top_p/tail_free/typical/softmax first is equally fast // if (k >= (int32_t)cur_p->size) { // return; // } if (k <= 0) { k = cur_p->size; } k = std::max(k, (int) 1); //min keep of 1 k = std::min(k, (int) cur_p->size); // Sort scores in descending order if (!cur_p->sorted) { auto comp = [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; }; if (k <= 128) { std::partial_sort(cur_p->data, cur_p->data + k, cur_p->data + cur_p->size, comp); } else { constexpr int nbuckets = 128; constexpr float bucket_low = -10.0f; constexpr float bucket_high = 10.0f; constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low); constexpr float bucket_inter = -bucket_low * bucket_scale; std::vector bucket_idx(cur_p->size); std::vector histo(nbuckets, 0); for (int i = 0; i < (int)cur_p->size; ++i) { const float val = cur_p->data[i].logit; int ib = int(bucket_scale * val + bucket_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low); ib = std::max(0, std::min(nbuckets-1, ib)); bucket_idx[i] = ib; ++histo[ib]; } int nhave = 0; int ib = nbuckets - 1; for ( ; ib >= 0; --ib) { nhave += histo[ib]; if (nhave >= k) { break; } } std::vector tmp_tokens(nhave); auto * ptr = tmp_tokens.data(); std::vector bucket_ptrs; bucket_ptrs.reserve(nbuckets - ib); for (int j = nbuckets - 1; j >= ib; --j) { bucket_ptrs.push_back(ptr); ptr += histo[j]; } for (int i = 0; i < (int)cur_p->size; ++i) { int j = bucket_idx[i]; if (j >= ib) { *bucket_ptrs[nbuckets-1-j]++ = cur_p->data[i]; } } ptr = tmp_tokens.data(); int ndone = 0; for (int j = nbuckets-1; j > ib; --j) { std::sort(ptr, ptr + histo[j], comp); ptr += histo[j]; ndone += histo[j]; } std::partial_sort(ptr, ptr + k - ndone, ptr + histo[ib], comp); std::memcpy(cur_p->data, tmp_tokens.data(), k*sizeof(llama_token_data)); } cur_p->sorted = true; } cur_p->size = k; } llama_token sample_token(llama_token_data_array * candidates, std::mt19937 & rng) { sample_softmax(candidates); std::vector probs; probs.reserve(candidates->size); TopPicksData newpick; for (size_t i = 0; i < candidates->size; ++i) { probs.push_back(candidates->data[i].p); } std::discrete_distribution<> dist(probs.begin(), probs.end()); int idx = dist(rng); newpick.selected_token = FileFormatTokenizeID(candidates->data[idx].id, file_format, true); float rp1 = (candidates->data[idx].p<=0.0001?0.0001f:candidates->data[idx].p); float sprob = logf(rp1); sprob = (sprob > 999.0f?999.0f:sprob); sprob = (sprob < -999.0f?-999.0f:sprob); newpick.selected_logprob = sprob; newpick.selected_probability = candidates->data[idx].p; newpick.selected_tokenid = candidates->data[idx].id; for (size_t i = 0; (i < candidates->size && idata[i].id, file_format, true)); float rp2 = (candidates->data[i].p<=0.0001?0.0001f:candidates->data[i].p); float prob = logf(rp2); prob = (prob > 999.0f?999.0f:prob); prob = (prob < -999.0f?-999.0f:prob); newpick.logprobs.push_back(prob); newpick.p.push_back(candidates->data[i].p); newpick.tokenid.push_back(candidates->data[i].id); } top_picks_history.push_back(newpick); llama_token result = candidates->data[idx].id; return result; } llama_token sample_token_mirostat(int n_vocab, llama_token_data_array * candidates, std::mt19937 & rng, float tau, float eta, int m, float * mu) { float N = float(n_vocab); sample_softmax(candidates); // Estimate s_hat using the most probable m tokens float s_hat = 0.0; float sum_ti_bi = 0.0; float sum_ti_sq = 0.0; for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) { float t_i = logf(float(i + 2) / float(i + 1)); float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p); sum_ti_bi += t_i * b_i; sum_ti_sq += t_i * t_i; } s_hat = sum_ti_bi / sum_ti_sq; // Compute k from the estimated s_hat and target surprise value float epsilon_hat = s_hat - 1; float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat); // Sample the next word X using top-k sampling sample_top_k(candidates, int(k)); llama_token X = sample_token(candidates, rng); // Compute error as the difference between observed surprise and target surprise value size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) { return candidate.id == X; })); float observed_surprise = -log2f(candidates->data[X_idx].p); float e = observed_surprise - tau; // Update mu using the learning rate and error *mu = *mu - eta * e; return X; } llama_token sample_token_mirostat_v2(llama_token_data_array * candidates, std::mt19937 & rng, float tau, float eta, float * mu) { sample_softmax(candidates); // Truncate the words with surprise values greater than mu candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) { return -log2f(candidate.p) > *mu; })); if (candidates->size == 0) { candidates->size = 1; } // Normalize the probabilities of the remaining words sample_softmax(candidates); // Sample the next word X from the remaining words llama_token X = sample_token(candidates,rng); // Compute error as the difference between observed surprise and target surprise value size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) { return candidate.id == X; })); float observed_surprise = -log2f(candidates->data[X_idx].p); float e = observed_surprise - tau; // Update mu using the learning rate and error *mu = *mu - eta * e; return X; } // Top-a (remove all tokens that have softmax probability less than top_a*m^2 where m is the maximum softmax probability) // top-a 0 is off (no effect) void sample_top_a(llama_token_data_array * candidates, float a, size_t min_keep) { if (a <= 0.0f || candidates->size<=1) { return; } sample_softmax(candidates); // Compute the cumulative probabilities float maxprob = candidates->data[0].p; float threshold = a * maxprob * maxprob; //tokens with probs less than this are removed size_t last_idx = candidates->size; for (size_t i = 0; i < candidates->size; ++i) { // Go until we reach a value under the threshold float checkprob = candidates->data[i].p; if (checkprob < threshold && i >= min_keep) { last_idx = i; break; } } // printf("\n\nCandidates: %d, A:%f, MaxProb: %f, Threshold: %f, LastIdx: %d",candidates->size,a,maxprob,threshold,last_idx); // printf("\nCandidates: %f %f %f %f\n",candidates->data[0].p,candidates->data[1].p,candidates->data[2].p,candidates->data[3].p); // Resize the output vector to keep only the selected tokens candidates->size = last_idx; } void sample_xtc(llama_token_data_array * candidates, float xtc_threshold, float xtc_probability, std::mt19937 & rng) { if (xtc_threshold > 0.5f || xtc_probability <= 0.0f || candidates->size <= 1) { return; } std::uniform_real_distribution dist(0.0f, 1.0f); float roll = dist(rng); if(roll>=xtc_probability) //if dice roll fails, skip xtc { return; } sample_softmax(candidates); //calculate how many tokens cross the xtc threshold size_t last_idx = candidates->size; for (size_t i = 0; i < candidates->size; ++i) { // Go until we reach a value under the threshold float checkprob = candidates->data[i].p; if (checkprob < xtc_threshold) { last_idx = i; break; } } if(last_idx>1) //if there are 2 or more viable candidates { if (debugmode==1 && !is_quiet) { printf("XTC penalties ["); } // then remove all other tokens above threshold EXCEPT the least likely one for (size_t i = 0; i < last_idx - 1; ++i) { if (debugmode==1 && !is_quiet) { gpt_vocab::id token = candidates->data[i].id; std::string tokenizedstr = FileFormatTokenizeID(token, file_format); ::utreplace(tokenizedstr, "\n", "\\n"); printf("%s(%s %.02f%%)", i == 0 ? "" : " ", RemoveBell(tokenizedstr).c_str(), 100.f * candidates->data[i].p); } candidates->data[i].logit -= 999.0f; //infinity gets wonky results downstream, this hack works well enough } if (debugmode==1 && !is_quiet) { printf("]\n"); } candidates->sorted = false; } //otherwise xtc does not do anything // printf("\n\nCandidates: %d, Threshold: %f, LastIdx: %d",candidates->size,xtc_threshold,last_idx); // printf("\nCandidates: %f %f %f %f\n",candidates->data[0].p,candidates->data[1].p,candidates->data[2].p,candidates->data[3].p); } void sample_dry(int n_ctx, int penalty_range, float penalty_multiplier, float penalty_base, int allowed_length, const std::unordered_multimap>& restart_sequences, llama_token_data_array * candidates) { if (penalty_multiplier <= 0.0f || penalty_base <= 0.0f) { return; } if (penalty_range <= 0 || penalty_range>n_ctx) { penalty_range = n_ctx; } auto last_n_repeat = std::min(std::min((int)current_context_tokens.size(), penalty_range), n_ctx); if (last_n_repeat <= allowed_length) { return; } const llama_token * last_tokens = current_context_tokens.data() + current_context_tokens.size() - last_n_repeat; dry_repeat_count.assign(last_n_repeat, 0); dry_max_token_repeat.clear(); // Step 1: Look for restart sequences to limit the maximum repetition length. // Work backwards through the context looking for any token that begins a restart sequence. // // The collection `restart_sequences` is a mapping from a "head" token to all "tail" // sequences that together comprise a restart sequence. This allows us to quickly check // whether each token is the head of a complete sequence. Most restart sequences are actually // a single token, and for these the "tail" is an empty vector. // // If the token is a "head", test all restart sequences that begin with this token // (there will often only be one sequence for each token, but if sequences like 'aaaq1' and // 'aaa1' are used as restart strings, both could start with 'aaa' when tokenized). The // longest matching sequence (if any) is used to limit the maximum repetition length. // // Note that in the case case of a short sequence contained in a longer one, this might fail to // find the smallest value for `rep_limit`. For example, if 'amniotic' and 'ni' are both used as // restart sequences, 'ni' will be found first, and since it's shorter it will fail to suppress // 'otic'. This is a minor issue since fully contained restart sequences are likely to be rare. // // This is theoretically worst-case O(N^2) for arbitrary restart sequences, which is why we // have already clamped the maximum tail sequence length when generating `restart_sequences`. // With clamping, this scan is O(N) in the context length. int rep_limit = last_n_repeat; for (size_t i = 0; i < last_n_repeat; ++i) { size_t ix = last_n_repeat - 1 - i; auto its = restart_sequences.equal_range(last_tokens[ix]); if (its.first == restart_sequences.end()) { continue; } int longest_match = -1; for (auto it = its.first; it != its.second; ++it) { // Note that (*it) does not contain the head character, so seq_len will be // the restart sequence length minus 1. // In the common case of a single-token restart sequence, (*it) will be empty // and we will trivially match. int seq_len = (int)it->second.size(); if (seq_len > longest_match && seq_len <= i) { bool match = true; for (size_t offset = 0; offset < seq_len; ++offset) { // The +1 when indexing `last_tokens` is because we already matched the head. if (it->second[offset] != last_tokens[ix + 1 + offset]) { match = false; break; } } if (match) { longest_match = seq_len; } } } if (longest_match >= 0) { // We found a restart sequence starting `i` tokens from the end and continuing for // `longest_match` tokens. rep_limit = (int)i - longest_match; break; } } if (rep_limit <= allowed_length) { return; } // Step 2: Iterate in reverse over the last N tokens of the context, using the "Z-algorithm" (in // the reverse direction) to efficiently compute the positions and lengths of suffixes appearing // elsewhere in the context. We limit the suffix length to `rep_limit` to respect restart sequences. // // This algorithm is not currently documented on Wikipedia, but there is a clear description here: // https://ivanyu.me/blog/2014/10/15/z-algorithm/ // // The code below is adapted from the public domain implementation by the same author here: // https://github.com/ivanyu/string-algorithms/blob/master/z_algorithm.py // // Example: // Last N tokens: a b c c b c y a b c // Repeat counts: 0 0 3 1 0 2 0 0 0 0 // ^ // This `3` means that the last three tokens of the context (a b c) also appear here. // // This step is worst case O(N) since the Z-algorithm is linear, despite the appearance of nested // for/while loops. This can be seen by observing that the `lt` and `rt` bounds are set after each // repeated suffix is detected (i.e. after each while loop when n > 0). These bound variables // ensure that the inner while loops only examine each token in the context once as the outer // for loop iterates over the context. { const int last = last_n_repeat - 1; int rt = 0, lt = 0; for (int k = 1; k < last_n_repeat; ++k) { if (k > rt) { // If k is outside the current Z-box, do naive computation. int n = 0; while (n + k < last_n_repeat && last_tokens[last - n] == last_tokens[last - (n+k)]) { ++n; } dry_repeat_count[last - k] = std::min(n, rep_limit); if (n > 0) { lt = k; rt = k+n-1; } } else { // If k is inside the current Z-box, consider two cases. int p = k - lt; // Pair index. int right_part_len = rt - k + 1; if (dry_repeat_count[last - p] < right_part_len) { int n = std::min(dry_repeat_count[last - p], rep_limit); dry_repeat_count[last - k] = n; } else { int i = rt + 1; while (i < last_n_repeat && last_tokens[last - i] == last_tokens[last - (i - k)]) { i += 1; } int n = std::min(i - k, rep_limit); dry_repeat_count[last - k] = n; lt = k; rt = i - 1; } } } } // Step 3: Iterate over dry_repeat_count and last_tokens, examining the maximum repeat length // that would be generated by emitting each new token that would extend a sequence. // // Following the same example as above: // Last N tokens: a b c c b c y a b c // Repeat counts: 0 0 3 1 0 2 0 0 0 0 // // For each non-zero, look ahead one token. This token, if emitted, would extend the repetition. // c: 3 -> 4 (from `a b c` to `a b c c`) // b: 1 -> 2 (from `c` to `c b`) // y: 2 -> 3 (from `b c` to `b c y`) for (size_t i = 0; i < last_n_repeat - 1; ++i) { int repeat_len = dry_repeat_count[i]; if (repeat_len >= allowed_length) { // This token ends a repeat, so the next token would continue one. // By convention, the value of `repeat_len` only includes the tokens currently // in the context, not the new token that would be added. gpt_vocab::id token = last_tokens[i + 1]; // Track the maximum sequence ending in this token. const auto& it = dry_max_token_repeat.find(token); if (it == dry_max_token_repeat.end() || it->second < repeat_len) { dry_max_token_repeat[token] = repeat_len; } } } // Step 4: Apply logit penalties based on the maximum repeat length for relevant tokens. // Prevent floating point overflow in `pow(penalty_base, exponent)` by clamping to `max_exponent`. // Compute it from `penalty_base` and the approximate log of `std::numeric_limits::max()` const float FLOAT_MAX_LOG = 88.7228391f; int max_exponent = 0; if (penalty_base > 1.000001f) { max_exponent = FLOAT_MAX_LOG / std::log(penalty_base); } if (debugmode==1 && !is_quiet && !dry_max_token_repeat.empty()) { printf("DRY penalties ["); } size_t count = 0; for (const auto& kvp: dry_max_token_repeat) { gpt_vocab::id token = kvp.first; int repeat_exp = kvp.second - allowed_length; if (max_exponent > 0 && repeat_exp > max_exponent) { repeat_exp = max_exponent; } float penalty = penalty_multiplier * pow(penalty_base, repeat_exp); if (debugmode==1 && !is_quiet) { std::string tokenizedstr = FileFormatTokenizeID(token, file_format); ::utreplace(tokenizedstr, "\n", "\\n"); printf("%s(%s %.02f)", count == 0 ? "" : " ", RemoveBell(tokenizedstr).c_str(), penalty); } candidates->data[token].logit -= penalty; ++count; } if(count>0) { candidates->sorted = false; } if (debugmode==1 && !is_quiet && !dry_max_token_repeat.empty()) { printf("]\n"); } } void sample_adaptive_p( float target, // desired average probability (0..1), <=0 disables float & weighted_sum, // persistent EMA state float & total_weight, // persistent EMA state llama_token_data_array * cur_p) { const float width = 0.3; // DISTRIBUTION_WIDTH const float peak_logit = 5.0; // PEAK_LOGIT_VALUE const float inv_width = 1.0f / width; // INV_WIDTH if (target <= 0.0f || cur_p->size == 0) { return; } // target is the desired average probability for selected tokens (0.0 to 1.0) // higher values favor more probable tokens (more stable and predictable) // lower values favor less probable tokens (more creative) sample_softmax(cur_p); // compute the adapted target probability for the current sampling step float computed_target = std::clamp(total_weight == 0.0f ? target : 2.0f * target - (weighted_sum / total_weight),0.0f, 1.0f); // adaptive p transform const float k = 10.0f; // controls sharpness for (size_t i = 0; i < cur_p->size; ++i) { float dist = (cur_p->data[i].p - computed_target) * inv_width; float abs_dist = fabs(dist); cur_p->data[i].logit = peak_logit - k * abs_dist * (abs_dist / (1.0f + abs_dist)); } cur_p->sorted = false; sample_softmax(cur_p); //update EMA history AFTER sampling, update_adaptive_p_history(original_prob[idx]) } inline void adaptive_p_update_history(float selected_token_prob, float & weighted_sum, float & total_weight, float adaptive_decay) { // decay controls how quickly history influence fades (0.0 to 0.99) // lower values = faster adaptation, more reactive to recent tokens // higher values = slower adaptation, more stable over time // keep <= 0.99 to prevent unbounded accumulation weighted_sum = selected_token_prob + adaptive_decay * weighted_sum; total_weight = 1.0f + adaptive_decay * total_weight; } void sample_rep_pen(int n_ctx, int rep_pen_range, float rep_pen, float rep_pen_slope, float presence_penalty, llama_token_data_array * candidates_p) { auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), rep_pen_range), n_ctx); const llama_token * last_tokens = last_n_tokens.data() + last_n_tokens.size() - last_n_repeat; size_t last_tokens_size = last_n_repeat; llama_token_data_array * candidates = candidates_p; if (last_tokens_size == 0 || (rep_pen == 1.0f && presence_penalty==0)) { return; } const int64_t t_start_sample_us = ggml_time_us(); // Create a frequency map to count occurrences of each token in last_tokens std::unordered_set tokens_near(last_tokens + last_n_repeat / 2, last_tokens + last_n_repeat); std::unordered_set tokens_far(last_tokens, last_tokens + last_n_repeat / 2); float rep_pen_reduced = rep_pen; if(rep_pen_reduced>1.0f) { rep_pen_reduced = 1.0f + ((rep_pen-1.0f)*rep_pen_slope); } for (size_t i = 0; i < candidates->size; ++i) { const bool token_in_near = tokens_near.find(candidates->data[i].id) != tokens_near.end(); const bool token_in_far = tokens_far.find(candidates->data[i].id) != tokens_far.end(); if (!token_in_near && !token_in_far) { continue; } float penalty = (token_in_near?rep_pen:rep_pen_reduced); // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong. // This is common fix for this problem, which is to multiply by the penalty instead of dividing. if (candidates->data[i].logit <= 0) { candidates->data[i].logit *= penalty; } else { candidates->data[i].logit /= penalty; } candidates->data[i].logit -= presence_penalty; } candidates->sorted = false; } void sample_top_p(llama_token_data_array * cur_p, float p, size_t min_keep) { if (p >= 1.0f) { return; } sample_softmax(cur_p); // Compute the cumulative probabilities float cum_sum = 0.0f; size_t last_idx = cur_p->size; for (size_t i = 0; i < cur_p->size; ++i) { cum_sum += cur_p->data[i].p; // Check if the running sum is at least p or if we have kept at least min_keep tokens // we set the last index to i+1 to indicate that the current iterate should be included in the set if (cum_sum >= p && i + 1 >= min_keep) { last_idx = i + 1; break; } } // Resize the output vector to keep only the top-p tokens cur_p->size = last_idx; } void sample_min_p(llama_token_data_array * cur_p, float p, size_t min_keep) { if (p <= 0.0f || !cur_p->size) { return; } bool min_p_applied = false; // if the cur_p aren't sorted, try the unsorted implementation first if (!cur_p->sorted) { std::vector filtered_tokens; float max_logit = -FLT_MAX; for (size_t i = 0; i < cur_p->size; ++i) { max_logit = std::max(max_logit, cur_p->data[i].logit); } const float min_logit = max_logit + logf(p); // min logit for p_i >= p * p_max for (size_t i = 0; i < cur_p->size; ++i) { if (cur_p->data[i].logit >= min_logit) { filtered_tokens.push_back(cur_p->data[i]); } } // if we have enough values the operation was a success if (filtered_tokens.size() >= min_keep) { memcpy(cur_p->data, filtered_tokens.data(), filtered_tokens.size()*sizeof(llama_token_data)); cur_p->size = filtered_tokens.size(); min_p_applied = true; } } // if the cur_p are sorted or the unsorted implementation failed, use this implementation if (!min_p_applied) { // Sort the logits in descending order if (!cur_p->sorted) { std::sort(cur_p->data, cur_p->data + cur_p->size, [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; }); cur_p->sorted = true; } const float min_logit = cur_p->data[0].logit + logf(p); // min logit for p_i >= p * p_max size_t i = 1; // first token always matches for (; i < cur_p->size; ++i) { if (cur_p->data[i].logit < min_logit && i >= min_keep) { break; // prob too small } } // Resize the output vector to keep only the matching tokens cur_p->size = i; } } void sample_tail_free(llama_token_data_array * cur_p, float z, size_t min_keep) { if (z >= 1.0f || cur_p->size <= 2) { return; } sample_softmax(cur_p); // Compute the first and second derivatives std::vector second_derivatives(cur_p->size - 2); float second_derivatives_sum = 0.0f; for (size_t i = 0; i < second_derivatives.size(); ++i) { float first_derivatives_1 = cur_p->data[i].p - cur_p->data[i + 1].p; float first_derivatives_2 = cur_p->data[i + 1].p - cur_p->data[i + 2].p; second_derivatives[i] = std::abs(first_derivatives_1 - first_derivatives_2); second_derivatives_sum += second_derivatives[i]; } // Normalize the second derivatives if (second_derivatives_sum > 1e-6f) { for (float & value : second_derivatives) { value /= second_derivatives_sum; } } else { for (float & value : second_derivatives) { value = 1.0f / second_derivatives.size(); } } float cum_sum = 0.0f; size_t last_idx = cur_p->size; for (size_t i = 0; i < second_derivatives.size(); ++i) { cum_sum += second_derivatives[i]; // Check if the running sum is greater than z or if we have kept at least min_keep tokens if (cum_sum > z && i >= min_keep) { last_idx = i; break; } } // Resize the output vector to keep only the tokens above the tail location cur_p->size = last_idx; } void sampler_typical(llama_token_data_array * cur_p, float p, size_t min_keep) { // Reference implementation: // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr if (p >= 1.0f) { return; } // Compute the softmax of logits and calculate entropy sample_softmax(cur_p); float entropy = 0.0f; for (size_t i = 0; i < cur_p->size; ++i) { if(cur_p->data[i].p>0) { entropy += -cur_p->data[i].p * logf(cur_p->data[i].p); } } // Compute the absolute difference between negative log probability and entropy for each candidate std::vector shifted_scores; for (size_t i = 0; i < cur_p->size; ++i) { float shifted_score = fabsf(-logf(cur_p->data[i].p) - entropy); shifted_scores.push_back(shifted_score); } // Sort tokens based on the shifted_scores and their corresponding indices std::vector indices(cur_p->size); std::iota(indices.begin(), indices.end(), 0); std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { return shifted_scores[a] < shifted_scores[b]; }); // Compute the cumulative probabilities float cum_sum = 0.0f; size_t last_idx = indices.size(); for (size_t i = 0; i < indices.size(); ++i) { size_t idx = indices[i]; cum_sum += cur_p->data[idx].p; // Check if the running sum is greater than typical or if we have kept at least min_keep tokens if (cum_sum > p && i >= min_keep - 1) { last_idx = i + 1; break; } } // Resize the output vector to keep only the locally typical tokens std::vector cur_p_new; for (size_t i = 0; i < last_idx; ++i) { size_t idx = indices[i]; cur_p_new.push_back(cur_p->data[idx]); } // Replace the data in cur_p with the cur_p_new data std::copy(cur_p_new.begin(), cur_p_new.end(), cur_p->data); cur_p->size = cur_p_new.size(); cur_p->sorted = false; } void sample_top_n_sigma(llama_token_data_array * cur_p, float nsigma) { if (nsigma <= 0.0f || cur_p->size <= 1) { return; } // find max logit and calculate mean float nsigmax = cur_p->data[0].logit; float logits_sum = 0; for (size_t i = 0; i < cur_p->size; ++i) { if (cur_p->data[i].logit > nsigmax) { nsigmax = cur_p->data[i].logit; } logits_sum += cur_p->data[i].logit; } float nsigmean = logits_sum / cur_p->size; // calculate standard deviation float nsigacc = 0; for (size_t i = 0; i < cur_p->size; ++i) { nsigacc += pow(cur_p->data[i].logit - nsigmean, 2); } float nsigstd = sqrt(nsigacc / cur_p->size); //apply mask auto last = std::remove_if(cur_p->data, cur_p->data + cur_p->size, [&](auto & tk) { return tk.logit < nsigmax - (nsigma * nsigstd); }); cur_p->size = last - cur_p->data; sample_softmax(cur_p); } void sample_entropy(llama_token_data_array * cur_p, float min_temp, float max_temp, float exponent_val, float smoothing_factor, float smoothing_curve) { // no need to do anything if there is only one (or zero) candidates if (cur_p->size <= 1) { return; } // Calculate maximum possible entropy float max_entropy = -logf(1.0f / cur_p->size); sample_softmax(cur_p); // Calculate entropy of the softmax probabilities float entropy = 0.0f; for (size_t i = 0; i < cur_p->size; ++i) { float prob = cur_p->data[i].p; if (prob > 0.0f) { // Ensure no log(0) entropy -= prob * logf(prob); } } // Normalize the entropy (max_entropy cannot be 0 here because we checked cur_p->size != 1 above) float normalized_entropy = entropy / max_entropy; // Map the normalized entropy to the desired temperature range using the power function float dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent_val); // Apply the dynamically calculated temperature scaling for (size_t i = 0; i < cur_p->size; ++i) { cur_p->data[i].logit /= dyn_temp; } // Re-compute softmax probabilities after scaling logits with dynamic temperature const double max_l_double = cur_p->data[0].logit; double cum_sum_double = 0.0; for (size_t i = 0; i < cur_p->size; ++i) { double p = exp(cur_p->data[i].logit - max_l_double); cur_p->data[i].p = p; // Store the scaled probability cum_sum_double += p; } for (size_t i = 0; i < cur_p->size; ++i) { cur_p->data[i].p /= cum_sum_double; // Re-normalize the probabilities } // Only apply smoothing if smoothing_factor is > 0. Do not change base implementation otherwise. if (smoothing_factor > 0 && cur_p->size > 1) { sample_softmax(cur_p); float h = cur_p->data[0].logit; // Find the maximum logit for h to be added after the transformation // Apply the modified quadratic transformation using the smoothing_factor and smoothing_curve for (size_t i = 0; i < cur_p->size; ++i) { float logit_shifted = cur_p->data[i].logit - h; float k = (3 - smoothing_curve) / 2; float s = (smoothing_curve - 1) / 2; cur_p->data[i].logit = -(k * smoothing_factor * logit_shifted * logit_shifted) + (s * smoothing_factor * logit_shifted * logit_shifted * logit_shifted) + h; } sample_softmax(cur_p); } } void sample_temperature(llama_token_data_array * candidates_p, float temp, float smoothing_factor, float smoothing_curve) { if (temp <= 0) { sample_top_k(candidates_p, 1); //only want first candidate return; } for (size_t i = 0; i < candidates_p->size; ++i) { candidates_p->data[i].logit /= temp; } // Only apply smoothing if smoothing_factor is > 0. Do not change base implementation otherwise. if (smoothing_factor > 0 && candidates_p->size > 1) { sample_softmax(candidates_p); float h = candidates_p->data[0].logit; // Find the maximum logit for h to be added after the transformation // Apply the modified quadratic transformation using the smoothing_factor and smoothing_curve for (size_t i = 0; i < candidates_p->size; ++i) { float logit_shifted = candidates_p->data[i].logit - h; float k = (3 - smoothing_curve) / 2; float s = (smoothing_curve - 1) / 2; candidates_p->data[i].logit = -(k * smoothing_factor * logit_shifted * logit_shifted) + (s * smoothing_factor * logit_shifted * logit_shifted * logit_shifted) + h; } sample_softmax(candidates_p); } } void sample_grammar(FileFormat file_format, int32_t n_vocab, llama_token_data_array * candidates, const struct llama_grammar * grammar) { const int64_t t_start_sample_us = ggml_time_us(); bool allow_eos = false; for (const auto & stack : grammar->stacks) { if (stack.empty()) { allow_eos = true; break; } } const std::vector eog_tokens = GetEogIDs(file_format,n_vocab); std::vector, llama_partial_utf8>> candidates_decoded; std::vector candidates_grammar; std::vector rejects; candidates_decoded.reserve(candidates->size); candidates_grammar.reserve(candidates->size); rejects.assign(candidates->size, false); for (size_t i = 0; i < candidates->size; ++i) { const llama_token id = candidates->data[i].id; const std::string piece = FileFormatTokenizeID(id,file_format); bool found_eog = std::find(eog_tokens.begin(), eog_tokens.end(), id) != eog_tokens.end(); if (found_eog) { if (!allow_eos) { rejects[i] = true; } } else if (piece.empty() || piece[0] == 0) { rejects[i] = true; } else { candidates_decoded.push_back(decode_utf8(piece.c_str(), grammar->partial_utf8)); candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second }); } } for (auto reject: llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar)) { rejects[reject.index] = true; } auto first = candidates->data; auto last = first + candidates->size; last = std::remove_if(first, last, [&](const llama_token_data & tk){ return rejects[&tk - first]; }); // tk.logit == -INFINITY; }); candidates->size = last - first; } void sample_guidance(struct llama_context * ctx, struct llama_context * guidance_ctx, int n_vocab, float scale) { float * guidanceLogitsPtr = llama_get_logits(guidance_ctx); float * mainLogitsPtr = llama_get_logits(ctx); if (scale < 0) { scale = 0; } if(debugmode==1 && !is_quiet) { int topidx1 = std::max_element(mainLogitsPtr, mainLogitsPtr + n_vocab) - mainLogitsPtr; int topidx2 = std::max_element(guidanceLogitsPtr, guidanceLogitsPtr + n_vocab) - guidanceLogitsPtr; printf("\nMain: (id:%d val:%f data:%s) Guided: (id:%d val:%f data:%s)\n", topidx1, mainLogitsPtr[topidx1], FileFormatTokenizeID(topidx1, file_format, true).c_str(), topidx2, guidanceLogitsPtr[topidx2], FileFormatTokenizeID(topidx2, file_format, true).c_str()); } for (int i = 0; i < n_vocab; ++i) { float logit_guidance = guidanceLogitsPtr[i]; float logit_main = mainLogitsPtr[i]; mainLogitsPtr[i] = scale * (logit_main-logit_guidance) + logit_guidance; } } int SampleLogits(const float * logits, int n_ctx, int n_vocab, int rep_pen_range, float rep_pen, float rep_pen_slope, float presence_penalty, float top_k, float top_a, float top_p, float min_p, float typical_p, float tfs, float nsigma, float temp, std::mt19937 & rng, int mirostat, float mirostat_tau, float mirostat_eta, float dry_multiplier, float dry_base, int dry_allowed_length, int dry_penalty_last_n, float xtc_threshold, float xtc_probability, const std::vector & sampler_order, llama_grammar * grammar, float dynatemp_range, float dynatemp_exponent, float smoothing_factor, float smoothing_curve, float adaptive_target) { // printf("SampleLogits called with: n_ctx=%d, n_vocab=%d, rep_pen_range=%d, rep_pen=%f, rep_pen_slope=%f, presence_penalty=%f, top_k=%f, top_a=%f, top_p=%f, min_p=%f, typical_p=%f, tfs=%f, nsigma=%f, temp=%f, mirostat=%d, mirostat_tau=%f, mirostat_eta=%f, dry_multiplier=%f, dry_base=%f, dry_allowed_length=%d, dry_penalty_last_n=%d, xtc_threshold=%f, xtc_probability=%f, sampler_order_size=%zu, dynatemp_range=%f, dynatemp_exponent=%f, smoothing_factor=%f\n", // n_ctx, n_vocab, rep_pen_range, rep_pen, rep_pen_slope, presence_penalty, top_k, top_a, top_p, min_p, typical_p, tfs, nsigma, temp, mirostat, mirostat_tau, mirostat_eta, dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n, xtc_threshold, xtc_probability, sampler_order.size(), dynatemp_range, dynatemp_exponent, smoothing_factor); int id = 0; std::vector candidates; candidates.reserve(n_vocab); for (llama_token token_id = 0; token_id < n_vocab; token_id++) { candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f}); } for(int i=0;i precache = (use_grammar ? std::vector(candidates) : std::vector(0)); sample_top_k(&candidates_p, 3000); if (use_grammar) { sample_grammar(file_format, n_vocab, &candidates_p, grammar); // if top_k 3000 doesn't contain a valid candidate for this grammar, try again pre-cull if (candidates_p.size <= 0) { candidates_p = { precache.data(), precache.size(), false }; sample_grammar(file_format, n_vocab, &candidates_p, grammar); sample_top_k(&candidates_p, 3000); } } if (mirostat == 1 || mirostat == 2) { static float mirostat_mu = 2.0f * mirostat_tau; const int mirostat_m = 100; sample_rep_pen(n_ctx, rep_pen_range, rep_pen, rep_pen_slope, presence_penalty, &candidates_p); sample_temperature(&candidates_p, temp, smoothing_factor, smoothing_curve); if (mirostat == 1) { id = sample_token_mirostat(n_vocab, &candidates_p, rng, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); } else { id = sample_token_mirostat_v2(&candidates_p, rng, mirostat_tau, mirostat_eta, &mirostat_mu); } } else { for (int i = 0; i < sampler_order.size(); i++) { switch (sampler_order[i]) { case KCPP_SAMPLER_TOP_K: sample_top_k(&candidates_p, top_k); break; case KCPP_SAMPLER_TOP_A: sample_top_a(&candidates_p, top_a, 1); break; case KCPP_SAMPLER_TOP_P: sample_top_p(&candidates_p, top_p, 1); sample_min_p(&candidates_p, min_p, 1); break; case KCPP_SAMPLER_TFS: sample_tail_free(&candidates_p, tfs, 1); break; case KCPP_SAMPLER_TYP: sampler_typical(&candidates_p, typical_p, 1); break; case KCPP_SAMPLER_TEMP: if (dynatemp_range!=0) { float dynatemp_min = temp - dynatemp_range; float dynatemp_max = temp + dynatemp_range; //do not allow negative values dynatemp_min = dynatemp_min<0?0:dynatemp_min; dynatemp_max = dynatemp_max<0?0:dynatemp_max; dynatemp_exponent = dynatemp_exponent<0?0:dynatemp_exponent; sample_entropy(&candidates_p, dynatemp_min, dynatemp_max, dynatemp_exponent, smoothing_factor, smoothing_curve); } else { sample_temperature(&candidates_p, temp, smoothing_factor, smoothing_curve); } if (nsigma > 0.0f) { sample_top_n_sigma(&candidates_p, nsigma); } break; case KCPP_SAMPLER_REP_PEN: sample_rep_pen(n_ctx, rep_pen_range, rep_pen, rep_pen_slope, presence_penalty, &candidates_p); break; default: printf("\nSampleLogits: Unknown Sampler : %d",sampler_order[i]); break; } } //xtc always last sample_xtc(&candidates_p, xtc_threshold, xtc_probability, rng); //adaptive p must be last, it messes up all probs sample_adaptive_p(adaptive_target, adaptive_p_weighted_sum, adaptive_p_total_weight, &candidates_p); id = sample_token(&candidates_p, rng); } return id; } static void grammar_accept_token(FileFormat file_format, int32_t n_vocab, struct llama_grammar * grammar, llama_token token) { const std::vector eog_tokens = GetEogIDs(file_format,n_vocab); bool found_eog = std::find(eog_tokens.begin(), eog_tokens.end(), token) != eog_tokens.end(); if (found_eog) { for (const auto & stack : grammar->stacks) { if (stack.empty()) { return; } } GGML_ASSERT(false); } const std::string piece = FileFormatTokenizeID(token,file_format); // Note terminating 0 in decoded string const auto decoded = decode_utf8(piece.c_str(), grammar->partial_utf8); const auto & code_points = decoded.first; for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) { llama_grammar_accept(grammar, *it); } grammar->partial_utf8 = decoded.second; GGML_ASSERT(!grammar->stacks.empty()); } static void load_grammar(const std::string & gammarstr) { if(grammar!=nullptr) //on demand free when next grammar is loaded { llama_grammar_reset_memos(); llama_grammar_free_impl(grammar); grammar = nullptr; } if (!gammarstr.empty()) { parsed_grammar = llama_grammar_parser(); parsed_grammar.parse(gammarstr.c_str()); // will be empty (default) if there are parse errors if (parsed_grammar.rules.empty()) { printf("\nIgnored invalid grammar sampler."); return; } if(debugmode==1 && !is_quiet) { parsed_grammar.print(stderr); } std::vector grammar_rules(parsed_grammar.c_rules()); grammar = llama_grammar_init_impl(nullptr,grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root")); } } static bool kcpp_eval_media(llama_context * ctx_llama, const media_chunk & mediachunk, int n_batch, int * n_past, bool is2d) { float * img_embd = mediachunk.clp_img_embd; int num_img_tokens = mediachunk.clp_image_tokens; int img_nx = mediachunk.nx; int img_ny = mediachunk.ny; int n_embd_mmproj = llama_model_n_embd_inp(llama_get_model(ctx_llama)); const int image_n_past = *n_past; kcpp_embd_batch media_batch = kcpp_embd_batch(img_embd, num_img_tokens, image_n_past, use_mrope, is2d, img_nx, img_ny); for (int i = 0; i < num_img_tokens; i += n_batch) { const int n_eval = std::min(n_batch, num_img_tokens - i); llama_batch batch_embd_view = media_batch.get_view(i, n_eval, n_embd_mmproj); if (llama_decode(ctx_llama, batch_embd_view)) { fprintf(stderr, "\n%s : failed to eval image\n", __func__); return false; } } *n_past += num_img_tokens; return true; } //counts the number of matching prefix tokens between two sequences, returns percentage matched 0.0 to 1.0 float ComputePrefixMatchPercent(std::vector ¤t_context_tokens, std::vector &new_context_tokens) { int match_count = 0; size_t min_length = std::min(current_context_tokens.size(), new_context_tokens.size()); for (size_t i = 0; i < min_length; ++i) { if (current_context_tokens[i] == new_context_tokens[i]) { match_count++; } else { break; } } // Handle case where both sequences are empty to avoid division by zero if (min_length == 0) { return 0.0f; // Both empty sequences are considered not matched } return static_cast(match_count) / static_cast(min_length); } //returns true if and only if sequence 1 is fully contained within the starting of sequence 2 bool FullyContainedPrefix(std::vector &sequence1, std::vector &sequence2) { if (sequence1.size() > sequence2.size() || sequence1.size()==0 || sequence2.size()==0) { return false; } for (size_t i = 0; i < sequence1.size(); ++i) { if (sequence1[i] != sequence2[i]) { return false; } } return true; } //given an old GGUF context and a new context that has some middle portion removed, //find and remove the middle portion from the old context from the KV. Does not fast forward after this destructive action //returns true if contextshift is doable, executes it if dryrun is false bool DoContextShifting(llama_context * ctx, llama_context * draft_ctx, std::vector ¤t_context_tokens, std::vector &new_context_tokens, const int genamt, const int nctx, bool dryrun) { //scan from start old and new ctx, until first mismatch found, save as p0 //check remaining old and new ctx for longest common subseq, which needs to be at 256 tokens //test: longest common subseq (LCQ) MUST start within 0 tokens from end of memory, otherwise purge fails //if passed, save beginning of LCQ from old ctx as p1 //remove all tokens from old ctx between p0 and p1, updating both arrays and kv, then continue as normal const int ShortfallThreshold = 200 + std::min((nctx/30),140); //dont trigger shifting if the distance between trimstart and currhead < this const int SlackAllowance = 60 + std::min((nctx/60),70); //in case the end text is slightly modified, be forgiving int trimstart = 0; int new_tokens_len = new_context_tokens.size(); bool purgeneeded = true; for (int i = 0; i < current_context_tokens.size(); ++i) { if (current_context_tokens[i] == new_context_tokens[i]) { trimstart += 1; } else { break; } if ((i + 2) >= new_tokens_len) { purgeneeded = false; break; //no surgery required } } if(!purgeneeded || new_tokens_len < 6 || current_context_tokens.size() < 6 || new_tokens_len - trimstart < ShortfallThreshold) { return false; //no purge is needed } //at least this many tokens need to match, otherwise don't bother trimming const int LCSTokThreshold = std::max(std::min((new_tokens_len - trimstart) - (genamt+SlackAllowance), (int)(nctx*0.45)), ShortfallThreshold-SlackAllowance); auto curr_ctx_without_memory = std::vector(current_context_tokens.begin() + trimstart, current_context_tokens.end()); auto new_ctx_without_memory = std::vector(new_context_tokens.begin() + trimstart, new_context_tokens.end()); auto shared = LongestCommonSubseq(curr_ctx_without_memory, new_ctx_without_memory); //printf("\nSharedSize: %d, LCSTokThreshold: %d, ArrPass: %d\n",shared.size(),LCSTokThreshold,ArrStartWith(new_ctx_without_memory, shared)); if (shared.size() > LCSTokThreshold && ArrStartWith(new_ctx_without_memory, shared)) // enough tokens in common { int found = ArrFindIndexOf(current_context_tokens,shared); if(found>=0 && found > trimstart) { bool ok = true; if(!dryrun) { //extract the unwanted tokens out from context and KV int diff = found - trimstart; ok = llama_memory_seq_rm(llama_get_memory(ctx), 0, trimstart, trimstart + diff); llama_memory_seq_add(llama_get_memory(ctx), 0, trimstart + diff, -1, -diff); if(draft_ctx) { llama_memory_seq_rm(llama_get_memory(draft_ctx), 0, trimstart, trimstart + diff); llama_memory_seq_add(llama_get_memory(draft_ctx), 0, trimstart + diff, -1, -diff); } for (size_t i = trimstart + diff; i < current_context_tokens.size() - 1; i++) { current_context_tokens[i - diff] = current_context_tokens[i]; } if(ok) { printf("\n[Context Shifting: Erased %d tokens at position %d]", diff, trimstart + 1); } else { printf("\n[Warning: Context Shifting FAILED to erase %d tokens at position %d]", diff, trimstart + 1); } current_context_tokens.resize(current_context_tokens.size() - diff); } return true; } } return false; } //returns true if context shifting is possible. does not execute the shift bool CanContextShift(std::vector ¤t_context_tokens, std::vector &new_context_tokens, const int genamt, const int nctx) { return DoContextShifting(nullptr,nullptr,current_context_tokens,new_context_tokens,genamt,nctx,true); } static int GetBatchSize(int desiredBlasBatchSize,FileFormat in_file_format) { //check if approved to use BLAS bool approved_format = !(file_format == FileFormat::BADFORMAT || file_format == FileFormat::GPT2_1 || file_format == FileFormat::GPTJ_1 || file_format == FileFormat::GPTJ_2 || file_format == FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2); if(!approved_format && desiredBlasBatchSize>0) { desiredBlasBatchSize = 16; } if(desiredBlasBatchSize<=0) { desiredBlasBatchSize = 1; } if (file_format != FileFormat::GGML && file_format != FileFormat::GGHF && file_format != FileFormat::GGJT && file_format != FileFormat::GGJT_2 && file_format != FileFormat::GGJT_3 && file_format != FileFormat::GGUF_GENERIC) { desiredBlasBatchSize = (desiredBlasBatchSize > 256 ? 256 : desiredBlasBatchSize); } if (file_format == FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2) { desiredBlasBatchSize = 1; } return desiredBlasBatchSize; } //this function applies automatic scaling to rope freq base when the desired context exceeds trained context static float CalcGradientAIRopeFreqBase(float original_rope_base, int n_ctx_train, int n_ctx_desired) { if(n_ctx_desired <= n_ctx_train || n_ctx_desired <= 2048) { return original_rope_base; } else { float ctx_multiplier = 1.0f; float chi_ctx_train_value = (n_ctx_train * ctx_multiplier) / 6.28318; float chi_ctx_value = (n_ctx_desired * ctx_multiplier) / 6.28318; float gradient_ai_rope_freq_base_value = powf(original_rope_base, log10f(chi_ctx_value) / log10f(chi_ctx_train_value)); return gradient_ai_rope_freq_base_value; } } void kcpp_init_audio_proj(clip_ctx * ctx_a) { projector_type proj = clip_get_projector_type(ctx_a); // set preprocessor switch (proj) { case PROJECTOR_TYPE_QWEN2A: case PROJECTOR_TYPE_QWEN25O: case PROJECTOR_TYPE_ULTRAVOX: case PROJECTOR_TYPE_VOXTRAL: case PROJECTOR_TYPE_GLMA: case PROJECTOR_TYPE_MUSIC_FLAMINGO: audio_preproc = std::make_unique(ctx_a); break; case PROJECTOR_TYPE_LFM2A: audio_preproc = std::make_unique(ctx_a); break; default: GGML_ABORT("unsupported audio projector type"); } // initialize audio preprocessor audio_preproc->initialize(); } ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format, FileFormatExtraMeta in_file_format_meta) { is_quiet = inputs.quiet; ggml_time_init(); kcpp_data = new kcpp_params(); //allocate on heap to avoid linux segfault. yes this leaks memory. file_format = in_file_format; file_format_meta = in_file_format_meta; kcpp_data->n_threads = inputs.threads; kcpp_data->n_blasthreads = inputs.blasthreads; bool isGguf = (file_format == FileFormat::GGUF_GENERIC); kcpp_pipeline_parallelism = inputs.pipelineparallel; kcpp_data->n_batch = GetBatchSize(inputs.batchsize, in_file_format); kcpp_data->n_ubatch = kcpp_data->n_batch; if(isGguf && kcpp_pipeline_parallelism) { //double the logical batch, while keeping the physical batch the same, pipeline parallel set GGML_SCHED_MAX_COPIES to 2 kcpp_data->n_batch *= 2; } kcpp_data->flash_attn = inputs.flash_attention; kcpp_data->model_filename = inputs.model_filename; kcpp_data->use_smartcontext = inputs.use_smartcontext; kcpp_data->use_contextshift = inputs.use_contextshift; kcpp_data->use_fastforward = inputs.use_fastforward; kcpp_data->smartcache = inputs.smartcache; kcpp_data->swa_full = !inputs.swa_support; if (!kcpp_data->swa_full) { if (inputs.use_contextshift) { kcpp_data->swa_full = true; //cannot use SWA printf("\nSWA Mode IS DISABLED!\nSWA Mode Cannot be used with Context Shifting!\n"); } else if (inputs.use_fastforward) { printf("\nSWA Mode is ENABLED!\nNote that using SWA Mode cannot be used with Context Shifting, and can lead to degraded recall when combined with Fast Forwarding!\n"); } else { printf("\nSWA Mode IS ENABLED!\nNote that using SWA Mode cannot be used with Context Shifting\n"); } } debugmode = inputs.debugmode; draft_ctx = nullptr; guidance_ctx = nullptr; audio_multimodal_supported = false; vision_multimodal_supported = false; use_mrope = false; auto clamped_max_context_length = inputs.max_context_length; if(clamped_max_context_length>16384 && file_format != FileFormat::GGUF_GENERIC) { printf("Warning: Only GGUF models can use max context above 16k. Max context lowered to 16k.\n"); clamped_max_context_length = 16384; } kcpp_data->n_ctx = clamped_max_context_length; max_context_limit_at_load = clamped_max_context_length; add_bos_token = !inputs.no_bos_token; load_guidance = inputs.load_guidance; check_slowness = inputs.check_slowness; highpriority = inputs.highpriority; if(!add_bos_token) { printf("\n======\nBOS token prefix was disabled! Your output may be degraded unless model was designed for it!\n======\n"); } neox_ctx_v2.hparams.n_ctx = neox_ctx_v3.hparams.n_ctx = gptj_ctx_v1.hparams.n_ctx = gptj_ctx_v2.hparams.n_ctx = gptj_ctx_v3.hparams.n_ctx = gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx = gpt2_ctx_v3.hparams.n_ctx = mpt_ctx_v3.hparams.n_ctx = kcpp_data->n_ctx; vision_max_res = inputs.visionmaxres; //determine rope scaling params float rope_freq_scale = 1.0f; float rope_freq_base = 10000.0f; bool overwriteRope = false; if(inputs.rope_freq_scale>0.0f && inputs.overridenativecontext==0) { rope_freq_scale = inputs.rope_freq_scale; rope_freq_base = inputs.rope_freq_base; overwriteRope = true; printf("Using Custom RoPE scaling (scale:%.3f, base:%.1f).\n",rope_freq_scale,rope_freq_base); } else { const int maxctxtrain = (inputs.overridenativecontext>0?inputs.overridenativecontext:2048); //Set freq base for all, including non GGUF. If we are using GGUF, this will be overwritten with more accurate values later. rope_freq_base = CalcGradientAIRopeFreqBase(10000.0f,maxctxtrain,kcpp_data->n_ctx); if(file_format==FileFormat::GGUF_GENERIC) { printf("Using automatic RoPE scaling for GGUF. If the model has custom RoPE settings, they'll be used directly instead!\n"); } else { printf("Using Automatic RoPE scaling, Pre-GGUF (scale:%.3f, base:%.1f).\n",rope_freq_scale, rope_freq_base); } } gptj_ctx_v3.hparams.rope_freq_scale = neox_ctx_v3.hparams.rope_freq_scale = rope_freq_scale; gptj_ctx_v3.hparams.rope_freq_base = neox_ctx_v3.hparams.rope_freq_base = rope_freq_base; //this is used for the mem_per_token eval, blas needs more RAM bool v3_use_scratch = ggml_v3_cpu_has_gpublas(); int kcpp_parseinfo_maindevice = inputs.kcpp_main_gpu<=0?0:inputs.kcpp_main_gpu; printf("System Info: %s\n", kcpp_print_system_info()); #if defined(GGML_USE_CUDA) if(file_format!=FileFormat::GGUF_GENERIC) { if(ggml_v3_cpu_has_gpublas() && kcpp_parseinfo_maindevice>0) { printf("CUBLAS v3: Set main device to %d\n",kcpp_parseinfo_maindevice); ggml_v3_cuda_set_main_device(kcpp_parseinfo_maindevice); } } #endif SetQuantsUnshuffled(false); if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2) { //newer format has bit unshuffling SetQuantsUnshuffled(file_format == FileFormat::GGJT_2); llama_v2_context_params llama_ctx_params_v2 = llama_v2_context_default_params(); llama_ctx_params_v2.n_ctx = clamped_max_context_length; llama_ctx_params_v2.seed = -1; llama_ctx_params_v2.f16_kv = true; llama_ctx_params_v2.logits_all = false; llama_ctx_params_v2.use_mmap = inputs.use_mmap; llama_ctx_params_v2.use_mlock = inputs.use_mlock; llama_ctx_params_v2.n_gpu_layers = inputs.gpulayers; llama_ctx_v2 = llama_v2_init_from_file(kcpp_data->model_filename.c_str(), llama_ctx_params_v2); if (llama_ctx_v2 == NULL) { fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, kcpp_data->model_filename.c_str()); return ModelLoadResult::FAIL; } printf("\n---\nWarning: Your model may be an OUTDATED format (ver %d). Please reconvert it for better results!\n---\n", file_format); if (lora_filename != "") { printf("\nAttempting to apply LORA adapter: %s\n", lora_filename.c_str()); int err = llama_v2_apply_lora_from_file(llama_ctx_v2, lora_filename.c_str(), nullptr, kcpp_data->n_threads); if (err != 0) { fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__); return ModelLoadResult::FAIL; } } n_vocab = llama_v2_n_vocab(llama_ctx_v2); //determine mem per token const std::vector tmp = {1, 2, 3, 4}; llama_v2_eval(llama_ctx_v2, tmp.data(), tmp.size(), 0, kcpp_data->n_threads); return ModelLoadResult::SUCCESS; } else if(file_format == FileFormat::GGJT_3) { llama_v3_context_params llama_ctx_params = llama_v3_context_default_params(); llama_ctx_params.n_ctx = clamped_max_context_length; llama_ctx_params.seed = -1; llama_ctx_params.f16_kv = true; llama_ctx_params.low_vram = inputs.low_vram; llama_ctx_params.mul_mat_q = inputs.use_mmq; llama_ctx_params.logits_all = false; llama_ctx_params.use_mmap = inputs.use_mmap; llama_ctx_params.use_mlock = inputs.use_mlock; llama_ctx_params.n_gpu_layers = inputs.gpulayers; llama_ctx_params.main_gpu = kcpp_parseinfo_maindevice; llama_ctx_params.rope_freq_base = rope_freq_base; llama_ctx_params.rope_freq_scale = rope_freq_scale; llama_ctx_params.n_batch = kcpp_data->n_batch; #if defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) bool ts_all_zero = true; for (int i = 0; i < tensor_split_max; ++i) { if (inputs.tensor_split[i] != 0.0f) { ts_all_zero = false; break; } } if(!ts_all_zero) { printf("\nApplying Tensor Split...\n"); llama_ctx_params.tensor_split = inputs.tensor_split; } #endif llama_ctx_v3 = llama_v3_init_from_file(kcpp_data->model_filename.c_str(), llama_ctx_params); if (llama_ctx_v3 == NULL) { fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, kcpp_data->model_filename.c_str()); return ModelLoadResult::FAIL; } if (lora_filename != "") { printf("\nAttempting to apply LORA adapter: %s\n", lora_filename.c_str()); int err = llama_v3_apply_lora_from_file(llama_ctx_v3, lora_filename.c_str(), nullptr, kcpp_data->n_threads); if (err != 0) { fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__); return ModelLoadResult::FAIL; } } n_vocab = llama_v3_n_vocab(llama_ctx_v3); //determine mem per token const std::vector tmp = {1, 2, 3, 4}; auto er = llama_v3_eval(llama_ctx_v3, tmp.data(), tmp.size(), 0, kcpp_data->n_threads); if(er!=0) { printf("\nModel Warmup Failed! (code:%d)\n",er); } return ModelLoadResult::SUCCESS; } else if(file_format==FileFormat::GGUF_GENERIC) { llama_backend_init(); llama_model_params model_params = llama_model_default_params(); llama_context_params llama_ctx_params = llama_context_default_params(); llama_ctx_params.n_ctx = clamped_max_context_length; llama_ctx_params.n_ctx += extra_context_handle_fragmentation; llama_ctx_params.offload_kqv = !inputs.low_vram; llama_ctx_params.kv_unified = true; model_params.use_mmap = inputs.use_mmap; model_params.use_mlock = inputs.use_mlock; model_params.use_direct_io = false; //no direct io for now until stable model_params.n_gpu_layers = inputs.gpulayers; //set device overrides if needed std::vector devices_override; std::string dev_override_str = inputs.devices_override; if(dev_override_str!="") { devices_override = kcpp_parse_device_list(dev_override_str); if(devices_override.size()>0) { printf("\nOverriding with %zu devices...\n",devices_override.size()-1); model_params.devices = devices_override.data(); } } if(kcpp_parseinfo_maindevice>0) { printf("Main GPU device: Try set to %d\n",kcpp_parseinfo_maindevice); } #if defined(GGML_USE_CUDA) printf("CUDA MMQ: %s\n",(inputs.use_mmq?"True":"False")); printf("---\nInitializing CUDA/HIP, please wait, the following step may take a few minutes (only for first launch)...\n---\n"); ggml_cuda_set_mul_mat_q(inputs.use_mmq); #endif model_params.main_gpu = kcpp_parseinfo_maindevice; #if defined(GGML_USE_CUDA) model_params.split_mode = (inputs.use_rowsplit?llama_split_mode::LLAMA_SPLIT_MODE_ROW:llama_split_mode::LLAMA_SPLIT_MODE_LAYER); #else model_params.split_mode = llama_split_mode::LLAMA_SPLIT_MODE_LAYER; #endif llama_ctx_params.n_batch = kcpp_data->n_batch; llama_ctx_params.n_ubatch = kcpp_data->n_ubatch; llama_ctx_params.n_threads = kcpp_data->n_threads; llama_ctx_params.n_threads_batch = kcpp_data->n_blasthreads; #if defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) bool ts_all_zero = true; for (int i = 0; i < tensor_split_max; ++i) { if (inputs.tensor_split[i] != 0.0f) { ts_all_zero = false; break; } } if(!ts_all_zero) { printf("\nApplying Tensor Split...\n"); model_params.tensor_split = inputs.tensor_split; } #endif //compat for old falcon if(file_format_meta.fileversion==1) { //apply compat fix printf("\nUsing older tokenizer for GGUFv1..."); OldBPETokenizerMode = true; } std::vector kvos; //ensure it keeps in scope until model is created std::vector tenos; //ensure it keeps in scope until model is created std::vector temp_tensor_names; //store temp tensor names to have mem references. temp_tensor_names.reserve(llama_max_tensor_buft_overrides()); //very important, prevents vector from reallocating tenos.reserve(llama_max_tensor_buft_overrides()); if(inputs.moe_experts>0) { printf("\nOverriding number of experts to %d\n",inputs.moe_experts); llama_model_kv_override kvo; std::string moekeystr = "llama"; if(file_format_meta.model_architecture_str!="") { moekeystr = file_format_meta.model_architecture_str; } moekeystr += ".expert_used_count"; const char * moekey = moekeystr.c_str(); std::strncpy(kvo.key, moekey, sizeof(kvo.key) - 1); kvo.key[sizeof(kvo.key) - 1] = '\0'; // Ensure null termination kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT; kvo.val_i64 = inputs.moe_experts; kvos.push_back(kvo); } for(int x=0;x0) { kvos.emplace_back(); kvos.back().key[0] = 0; model_params.kv_overrides = kvos.data(); } //handle override tensor std::string tensoroverrides = inputs.override_tensors; if(ggml_backend_dev_count()>1 && inputs.moecpu>0) { std::string toadd = ""; for (int i = 0; i < inputs.moecpu; ++i) { std::string tmp = string_format("blk\\.%d\\.ffn_(up|down|gate|gate_up)_(ch|)exps=CPU", i); if(i>0) { tmp = "," + tmp; } toadd += tmp; } if (tensoroverrides == "") { tensoroverrides = toadd; } else { tensoroverrides += "," + toadd; } printf("Overriding %d MoE layers to CPU...\n",inputs.moecpu); } if(tensoroverrides!="" && ggml_backend_dev_count()>1) { printf("Handling Override Tensors for backends: "); std::map buft_list; for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { auto * dev = ggml_backend_dev_get(i); auto * buft = ggml_backend_dev_buffer_type(dev); if (buft) { std::string name = ggml_backend_buft_name(buft); printf("%s ", name.c_str()); buft_list[name] = buft; } } printf("\n\n"); for (const auto & overrider : string_split(tensoroverrides, ',')) { std::string::size_type pos = overrider.find('='); if (pos == std::string::npos) { printf("\nInvalid Override Tensor: %s\n",overrider.c_str()); continue; } std::string tensor_name = overrider.substr(0, pos); std::string buffer_type = overrider.substr(pos + 1); string_trim_whitespace(tensor_name); string_trim_whitespace(buffer_type); if (buft_list.find(buffer_type) == buft_list.end()) { printf("\nUnknown Buffer Type: %s\n",buffer_type.c_str()); continue; } llama_model_tensor_buft_override nto; temp_tensor_names.push_back(tensor_name); nto.pattern = temp_tensor_names[temp_tensor_names.size()-1].c_str(); nto.buft = buft_list.at(buffer_type); tenos.push_back(nto); printf("Override Tensor: %s to %s\n",tensor_name.c_str(),buffer_type.c_str()); } } if(tenos.size()>0) { tenos.push_back({nullptr, nullptr}); model_params.tensor_buft_overrides = tenos.data(); } //apply overrides from autofit float tensor_split_temp[128] = {0}; //temp buffer for autofit std::vector fit_params_target = std::vector(llama_max_devices(),1024*1024*1024); if(inputs.autofit) { #if defined(GGML_USE_HIP) rocblas_initialize(); #endif // defined(GGML_USE_HIP) common_params temp_params; size_t taxmb = inputs.autofit_tax_mb; printf("\nAttempting to use llama.cpp's automating fitting code. This will override all your layer configs, may or may not work!\n"); //zero out any customizations made tenos.clear(); tenos.push_back({nullptr, nullptr}); model_params.tensor_buft_overrides = tenos.data(); model_params.tensor_split = tensor_split_temp; model_params.n_gpu_layers = -1; //must be this value to be considered default printf("Autofit Reserve Space: %zu MB\n",taxmb); //disable log spam bool dospam = (debugmode==1 && !is_quiet); ggml_log_callback currlogger; void * curruserdat; if(!dospam) { llama_log_get(&currlogger, &curruserdat); llama_log_set(log_callback_off, nullptr); } fit_params_target[0] = taxmb*1024*1024; bool success = (llama_params_fit(kcpp_data->model_filename.c_str(), &model_params, &llama_ctx_params, tensor_split_temp, tenos.data(), fit_params_target.data(), kcpp_data->n_ctx, GGML_LOG_LEVEL_NONE)==0); if(!dospam) { llama_log_set(currlogger, curruserdat); } printf("Autofit Success: %d, Autofit Result: ",success); print_fitted_params(model_params,llama_ctx_params); if(!success) { //revert to previous model_params.n_gpu_layers = inputs.gpulayers; } } llama_model * llamamodel = llama_model_load_from_file(kcpp_data->model_filename.c_str(), model_params); //prepare savestate slots savestate_limit = inputs.smartcacheslots; //if RNN model AND shifting and fastforward is on, enable smartcache if((llama_model_is_recurrent(llamamodel) || llama_model_is_hybrid(llamamodel)) && kcpp_data->use_fastforward && kcpp_data->use_contextshift) { if(savestate_limit>0) { printf("RNN or Hyrbid model with FF and shifting flags enabled - SmartCache will be enabled with extra slots. Disable CtxShift if you do not want this.\n",savestate_limit); kcpp_data->smartcache = true; savestate_limit += 1; } } savestates.resize(savestate_limit); if(kcpp_data->smartcache) { printf("SmartCache: Prepared %d KV slots\n",savestate_limit); } if(!kcpp_data->use_fastforward && kcpp_data->smartcache) { kcpp_data->smartcache = false; printf("\nSmartCache IS DISABLED!\nSmartCache requires Fast Forwarding!\n"); } if(llama_model_rope_type(llamamodel)==LLAMA_ROPE_TYPE_MROPE || llama_model_rope_type(llamamodel)==LLAMA_ROPE_TYPE_IMROPE) { printf("\nMRope is used, context shift will be disabled!\n"); kcpp_data->use_contextshift = false; use_mrope = true; } if(overwriteRope) { llama_ctx_params.rope_freq_base = rope_freq_base; llama_ctx_params.rope_freq_scale = rope_freq_scale; } else { //if the model modifes rope in any way, or uses yarn, use the model values. Otherwise, use our automatic ones //special exception for llama, which uses auto scale if(inputs.overridenativecontext > 0) { printf("Automatic RoPE Scaling: Adjust based on override train context of %d.\n",inputs.overridenativecontext); rope_freq_base = CalcGradientAIRopeFreqBase(llamamodel->hparams.rope_freq_base_train, inputs.overridenativecontext, kcpp_data->n_ctx); llama_ctx_params.rope_freq_base = rope_freq_base; llama_ctx_params.rope_freq_scale = rope_freq_scale; printf("Automatic RoPE Scaling: Using (scale:%.3f, base:%.1f).\n", rope_freq_scale, rope_freq_base); } else if((llamamodel->hparams.rope_freq_base_train!=10000.0f && llamamodel->hparams.rope_freq_base_train!=500000.0f) || llamamodel->hparams.rope_freq_scale_train!=1.0f || llamamodel->hparams.rope_scaling_type_train==2) { printf("Automatic RoPE Scaling: Using model internal value.\n"); } else { //Calculate rope_freq_base using the gradientAI formula, solar requires ctx *8 for correct scaling rope_freq_base = CalcGradientAIRopeFreqBase(llamamodel->hparams.rope_freq_base_train, file_format_meta.n_ctx_train, kcpp_data->n_ctx); llama_ctx_params.rope_freq_base = rope_freq_base; llama_ctx_params.rope_freq_scale = rope_freq_scale; printf("Automatic RoPE Scaling: Using (scale:%.3f, base:%.1f).\n", rope_freq_scale, rope_freq_base); } } if(file_format_meta.model_architecture==llm_arch::LLM_ARCH_RWKV6 || file_format_meta.model_architecture==llm_arch::LLM_ARCH_RWKV7 || file_format_meta.model_architecture==llm_arch::LLM_ARCH_ARWKV7 || file_format_meta.model_architecture==llm_arch::LLM_ARCH_RWKV6QWEN2) { printf("\nRWKV6 Overriding EOS and BOS IDs to 0\n"); llamamodel->vocab.set_eos_bos(0,0); } llama_ctx_params.flash_attn_type = (kcpp_data->flash_attn?LLAMA_FLASH_ATTN_TYPE_ENABLED:LLAMA_FLASH_ATTN_TYPE_DISABLED); llama_ctx_params.swa_full = kcpp_data->swa_full; llama_ctx_params.type_k = (inputs.quant_k==2?GGML_TYPE_Q4_0:(inputs.quant_k==1?GGML_TYPE_Q8_0:(inputs.quant_k==3?GGML_TYPE_BF16:GGML_TYPE_F16))); llama_ctx_params.type_v = (inputs.quant_v==2?GGML_TYPE_Q4_0:(inputs.quant_v==1?GGML_TYPE_Q8_0:(inputs.quant_v==3?GGML_TYPE_BF16:GGML_TYPE_F16))); llama_ctx_v4 = llama_init_from_model(llamamodel, llama_ctx_params); if(load_guidance) { guidance_ctx = llama_init_from_model(llamamodel, llama_ctx_params); } if (llama_ctx_v4 == NULL) { fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, kcpp_data->model_filename.c_str()); return ModelLoadResult::FAIL; } //we use a threadpool, greatly speeds up qwen3moe tg ggml_threadpool_params threadpool1_params, threadpool2_params; ggml_threadpool_params_init(&threadpool1_params,kcpp_data->n_threads); ggml_threadpool_params_init(&threadpool2_params,kcpp_data->n_blasthreads); if(inputs.highpriority) { threadpool1_params.prio = GGML_SCHED_PRIO_HIGH; threadpool2_params.prio = GGML_SCHED_PRIO_HIGH; } printf("Threadpool set to %d threads and %d blasthreads...\n", kcpp_data->n_threads,kcpp_data->n_blasthreads); struct ggml_threadpool * threadpool1 = ggml_threadpool_new(&threadpool1_params); struct ggml_threadpool * threadpool2 = ggml_threadpool_new(&threadpool2_params); if (!threadpool1 || !threadpool2) { fprintf(stderr, "%s: error: failed to create threadpool.\n", __func__); return ModelLoadResult::FAIL; } llama_attach_threadpool(llama_ctx_v4, threadpool1, threadpool2); std::vector loras; std::vector lorascales; if (lora_filename != "") { printf("\nAttempting to apply LORA adapter: %s\n", lora_filename.c_str()); auto adapter = llama_adapter_lora_init(llamamodel, lora_filename.c_str()); if (adapter == nullptr) { fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__); return ModelLoadResult::FAIL; } loras.push_back(adapter); lorascales.push_back(inputs.lora_multiplier); llama_set_adapters_lora(llama_ctx_v4, loras.data(), loras.size(), lorascales.data()); } if(mmproj_filename != "" && file_format==FileFormat::GGUF_GENERIC) { printf("\nAttempting to apply Multimodal Projector: %s\n", mmproj_filename.c_str()); #if defined(GGML_USE_METAL) if(file_format_meta.model_architecture == llm_arch::LLM_ARCH_QWEN2VL || file_format_meta.model_architecture == llm_arch::LLM_ARCH_GEMMA3) { set_clip_uses_gpu(false); printf("Clip will use CPU for this model!\n"); } #endif clip_flash_attn_type clip_fa = (kcpp_data->flash_attn?CLIP_FLASH_ATTN_TYPE_ENABLED:CLIP_FLASH_ATTN_TYPE_DISABLED); //kcpp: disabled in 1.102.2 as some headsizes break on turing #if defined(GGML_USE_CUDA) clip_fa = CLIP_FLASH_ATTN_TYPE_DISABLED; //kcpp: disabled in 1.102.2 as some headsizes break on turing #endif if(inputs.mmproj_cpu) { set_clip_uses_gpu(false); printf("Clip forced to use CPU!\n"); clip_fa = (kcpp_data->flash_attn?CLIP_FLASH_ATTN_TYPE_ENABLED:CLIP_FLASH_ATTN_TYPE_DISABLED); //however if using CPU, fa is fine } clip_context_params ctx_clip_params { /* use_gpu */ true, /* flash_attn_type */ clip_fa, /* image_min_tokens */ -1, /* image_max_tokens */ -1, }; clip_init_result cres = clip_init(mmproj_filename.c_str(), ctx_clip_params); clp_ctx_v = cres.ctx_v; clp_ctx_a = cres.ctx_a; if(clp_ctx_v == nullptr && clp_ctx_a == nullptr) { fprintf(stderr, "%s: error: failed to load mmproj model!\n", __func__); return ModelLoadResult::FAIL; } const int n_embd_llm = llama_model_n_embd_inp(llamamodel); int n_embd_clip_a = -1; int n_embd_clip_v = -1; if (clp_ctx_v) { n_embd_clip_v = clip_n_mmproj_embd(clp_ctx_v); if (n_embd_clip_v != n_embd_llm) { fprintf(stderr, "%s: mmproj vision embedding mismatch (%d and %d)! Make sure you use the correct mmproj file!\n", __func__,n_embd_clip_v, n_embd_llm); return ModelLoadResult::FAIL; } } if (clp_ctx_a) { n_embd_clip_a = clip_n_mmproj_embd(clp_ctx_a); if (n_embd_clip_a != n_embd_llm) { fprintf(stderr, "%s: mmproj audio embedding mismatch (%d and %d)! Make sure you use the correct mmproj file!\n", __func__,n_embd_clip_a, n_embd_llm); return ModelLoadResult::FAIL; } } if (clp_ctx_v && clp_ctx_a && n_embd_clip_v != n_embd_clip_a) { fprintf(stderr, "%s: mmproj embedding mismatch between Audio and Vision (%d and %d)! Make sure you use the correct mmproj file!\n", __func__,n_embd_clip_v, n_embd_clip_a); return ModelLoadResult::FAIL; } if(clp_ctx_v) { vision_multimodal_supported = true; } clp_img_data = clip_image_u8_init(); if(clp_ctx_a) //init audio { kcpp_init_audio_proj(clp_ctx_a); audio_multimodal_supported = true; } } const llama_vocab * tmpvocab = llama_model_get_vocab(llamamodel); n_vocab = llama_vocab_n_tokens(tmpvocab); if(draftmodel_filename !="" && file_format==FileFormat::GGUF_GENERIC) { if(llama_model_is_recurrent(llamamodel) || llama_model_is_hybrid(llamamodel)) { printf("Error: Speculative decoding cannot be used with Recurrent models!\n"); } else if(clp_ctx_v!=nullptr || clp_ctx_a!=nullptr) { printf("Error: Speculative decoding cannot be used with multimodal projectors!\n"); } else { printf("\nAttempting to load draft model for speculative decoding. It will be fully offloaded if possible. Vocab must match the main model.\n"); speculative_chunk_amt = inputs.draft_amount; speculative_decoding_setup(draftmodel_filename, model_params, llama_ctx_params, n_vocab, inputs.draft_gpusplit, inputs.draft_gpulayers); } } //we cannot really trust the add bos in vocab. old models don't set it. // instead, we EXPLICITY need to find the add_bos_token key==false to automatically set it off. if(!llamamodel->vocab.get_add_bos() && add_bos_token && file_format_meta.explicitly_no_bos && file_format_meta.model_architecture!=llm_arch::LLM_ARCH_GEMMA4) //gemma4 MUST have bos even if meta says no { printf("\nThis architecture has explicitly disabled the BOS token - if you need it, you must add it manually.\n"); add_bos_token = false; } if (file_format == FileFormat::GGUF_GENERIC && (file_format_meta.model_architecture == llm_arch::LLM_ARCH_GLM4 || file_format_meta.model_architecture == llm_arch::LLM_ARCH_GLM4_MOE || file_format_meta.model_architecture == llm_arch::LLM_ARCH_DEEPSEEK2)) { std::string temp = gpttype_get_chat_template(); if (temp.find("[gMASK]") != std::string::npos) { printf("GLM-4 will have no automatic BOS token.\n"); add_bos_token = false; } } printf("Starting model warm up, please wait a moment...\n"); //warmup at least 33 tokens to trigger batch std::vector tmp; for (int i = 1; i <= 33; ++i) { tmp.push_back(i); } llama_memory_clear(llama_get_memory(llama_ctx_v4),true); auto er = llama_decode(llama_ctx_v4, llama_batch_get_one(tmp.data(), tmp.size())); if(er!=0) { printf("\nModel Warmup Failed! (code:%d)\n",er); } tmp = {1}; llama_memory_clear(llama_get_memory(llama_ctx_v4),true); er = llama_decode(llama_ctx_v4, llama_batch_get_one(tmp.data(), tmp.size())); if(er!=0) { printf("\nModel Warmup Failed! (code:%d)\n",er); } return ModelLoadResult::SUCCESS; } else if (file_format == FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2) { //start loading the models first bool useWorldTokenizer = false; if (file_format == FileFormat::RWKV_1) { rwkv_ctx_v2 = rwkv_v2_init_from_file(kcpp_data->model_filename.c_str(), kcpp_data->n_threads); } else //rwkv_2 { rwkv_ctx_v3 = rwkv_init_from_file(kcpp_data->model_filename.c_str(), kcpp_data->n_threads); if(inputs.gpulayers>0) { rwkv_gpu_offload_layers(rwkv_ctx_v3,inputs.gpulayers); } const struct rwkv_file_header & header = rwkv_ctx_v3->instance->model.header; const size_t n_vocab = header.n_vocab; printf("\nDetected Vocab: %zu",n_vocab); if(n_vocab>60000) { printf("\nUsing WORLD TOKENIZER"); useWorldTokenizer = true; } } std::string word; if(useWorldTokenizer) { read_rwkv_world_vocab(); } else { read_rwkv_vocab(); } int vocabsiz = rwkv_vocab.size(); for (int i = 0; i < vocabsiz; i++) { uint32_t len; word = rwkv_vocab[i]; vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } printf("\nRWKV Vocab: %u\n", vocabsiz); logits.resize(vocabsiz); n_vocab = vocab.id_to_token.size(); //handled separately if (file_format == FileFormat::RWKV_1) { //setup buffers for rwkv state auto padding = 512u; auto statebufsiz = rwkv_v2_get_state_buffer_element_count(rwkv_ctx_v2) * sizeof(float) + padding; auto logitbufsiz = rwkv_v2_get_logits_buffer_element_count(rwkv_ctx_v2) * sizeof(float) + padding; printf("\nRWKV old Init: State Buffer:%lu, Logit Buffer:%lu\n", statebufsiz, logitbufsiz); rwkv_ctx_v2->state_out = (float *)malloc(statebufsiz); rwkv_ctx_v2->logits_out = (float *)malloc(logitbufsiz); rwkv_ctx_v2->state_in = nullptr; bool testeval = rwkv_v2_eval(rwkv_ctx_v2, 0, rwkv_ctx_v2->state_in, rwkv_ctx_v2->state_out, rwkv_ctx_v2->logits_out); if (!testeval) { printf("\nError: RWKV old Init Eval Failed!\n"); } memcpy(logits.data(), rwkv_ctx_v2->logits_out, sizeof(float) * vocabsiz); if (rwkv_ctx_v2 == NULL) { return ModelLoadResult::FAIL; } return ModelLoadResult::SUCCESS; } else { //setup buffers for rwkv state auto padding = 512u; auto statebufsiz = rwkv_get_state_buffer_element_count(rwkv_ctx_v3) * sizeof(float) + padding; auto logitbufsiz = rwkv_get_logits_buffer_element_count(rwkv_ctx_v3) * sizeof(float) + padding; printf("\nRWKV Init: State Buffer:%lu, Logit Buffer:%lu\n", statebufsiz, logitbufsiz); rwkv_ctx_v3->state_out = (float *)malloc(statebufsiz); rwkv_ctx_v3->logits_out = (float *)malloc(logitbufsiz); rwkv_ctx_v3->state_in = nullptr; bool testeval = rwkv_eval(rwkv_ctx_v3, kcpp_data->n_threads, 0, rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, rwkv_ctx_v3->logits_out); if (!testeval) { printf("\nError: RWKV Init Eval Failed!\n"); } memcpy(logits.data(), rwkv_ctx_v3->logits_out, sizeof(float) * vocabsiz); if (rwkv_ctx_v3 == NULL) { return ModelLoadResult::FAIL; } return ModelLoadResult::SUCCESS; } } else if (file_format == FileFormat::GPT2_1) { ModelLoadResult res = legacy_gpt2_model_load(kcpp_data->model_filename, gpt2_ctx_v1, vocab, file_format); if(res==ModelLoadResult::FAIL) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_data->model_filename.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) { printf("\nTensor Transposition Detected! Retrying GPT-2 model loading..."); return res; } n_vocab = gpt2_ctx_v1.hparams.n_vocab; // determine the required inference memory per token: legacy_gpt2_eval(gpt2_ctx_v1, kcpp_data->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format); return ModelLoadResult::SUCCESS; } else if (file_format == FileFormat::GPT2_2 || file_format==FileFormat::GPT2_3 || file_format==FileFormat::GPT2_4) { if(file_format==FileFormat::GPT2_4) { ModelLoadResult res = gpt2_model_load(kcpp_data->model_filename, gpt2_ctx_v3, vocab, file_format, inputs.gpulayers); if(res==ModelLoadResult::FAIL) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_data->model_filename.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) { printf("\nTensor Transposition Detected! Retrying GPT-2 model loading..."); return res; } n_vocab = gpt2_ctx_v3.hparams.n_vocab; // determine the required inference memory per token: gpt2_eval(gpt2_ctx_v3, kcpp_data->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, v3_use_scratch); return ModelLoadResult::SUCCESS; } else { //newer format has bit unshuffling SetQuantsUnshuffled(file_format == FileFormat::GPT2_3); ModelLoadResult res = gpt2_v2_model_load(kcpp_data->model_filename, gpt2_ctx_v2, vocab, file_format, inputs.gpulayers); if(res==ModelLoadResult::FAIL) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_data->model_filename.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) { printf("\nTensor Transposition Detected! Retrying GPT-2 model loading..."); return res; } n_vocab = gpt2_ctx_v2.hparams.n_vocab; // determine the required inference memory per token: gpt2_v2_eval(gpt2_ctx_v2, kcpp_data->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format); return ModelLoadResult::SUCCESS; } } else if (file_format == FileFormat::GPTJ_1 || file_format == FileFormat::GPTJ_2) { ModelLoadResult res = legacy_gptj_model_load(kcpp_data->model_filename, gptj_ctx_v1, vocab, file_format); if(res==ModelLoadResult::FAIL) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_data->model_filename.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) { printf("\nTensor Transposition Detected! Retrying GPT-J model loading..."); return res; } n_vocab = gptj_ctx_v1.hparams.n_vocab; // determine the required inference memory per token: legacy_gptj_eval(gptj_ctx_v1, kcpp_data->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format); //if the logits are NAN or duplicated, it means the model is incompatible if(logits.size()>0 && IsNanCheck(logits[0])) { printf("\nBad Logits detected! Retrying GPT-J model loading..."); ggml_v1_free(gptj_ctx_v1.ctx); return ModelLoadResult::RETRY_LOAD; } return ModelLoadResult::SUCCESS; } else if(file_format == FileFormat::GPTJ_3 || file_format == FileFormat::GPTJ_4 || file_format == FileFormat::GPTJ_5) { if(file_format == FileFormat::GPTJ_5) { ModelLoadResult loadresult = gptj_model_load(kcpp_data->model_filename, gptj_ctx_v3, vocab, inputs.gpulayers); if (loadresult == ModelLoadResult::FAIL) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_data->model_filename.c_str()); return loadresult; } else if (loadresult == ModelLoadResult::RETRY_LOAD) { printf("\nTensor Transposition Detected! Retrying GPT-J model loading..."); return loadresult; } n_vocab = gptj_ctx_v3.hparams.n_vocab; // determine the required inference memory per token: gptj_eval(gptj_ctx_v3, kcpp_data->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, v3_use_scratch); //if the logits are NAN or duplicated, it means the model is incompatible std::vector oldlogits(logits); //this is another hack because they change the library - we run the eval through the model //twice and compare logits. if they give the same logits for different inputs, model is broken gptj_eval(gptj_ctx_v3, kcpp_data->n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token, v3_use_scratch); if(logits.size()>0 && (IsNanCheck(logits[0]) || LogitsDuplicated(oldlogits,logits))) { printf("\nBad Logits detected! Retrying GPT-J model loading..."); ggml_v3_free(gptj_ctx_v3.ctx); return ModelLoadResult::RETRY_LOAD; } return ModelLoadResult::SUCCESS; } else { //newer format has bit unshuffling SetQuantsUnshuffled(file_format == FileFormat::GPTJ_4); ModelLoadResult loadresult = gptj_v2_model_load(kcpp_data->model_filename, gptj_ctx_v2, vocab, inputs.gpulayers); if (loadresult == ModelLoadResult::FAIL) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_data->model_filename.c_str()); return loadresult; } else if (loadresult == ModelLoadResult::RETRY_LOAD) { printf("\nTensor Transposition Detected! Retrying GPT-J model loading..."); return loadresult; } n_vocab = gptj_ctx_v2.hparams.n_vocab; // determine the required inference memory per token: gptj_v2_eval(gptj_ctx_v2, kcpp_data->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); //if the logits are NAN or duplicated, it means the model is incompatible std::vector oldlogits(logits); //this is another hack because they change the library - we run the eval through the model //twice and compare logits. if they give the same logits for different inputs, model is broken gptj_v2_eval(gptj_ctx_v2, kcpp_data->n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token); if(logits.size()>0 && (IsNanCheck(logits[0]) || LogitsDuplicated(oldlogits,logits))) { printf("\nBad Logits detected! Retrying GPT-J model loading..."); ggml_v2_free(gptj_ctx_v2.ctx); return ModelLoadResult::RETRY_LOAD; } return ModelLoadResult::SUCCESS; } } else if(file_format==FileFormat::NEOX_1 || file_format==FileFormat::NEOX_2 || file_format==FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5|| file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7) { if(file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7) { ModelLoadResult res = gpt_neox_model_load(kcpp_data->model_filename, neox_ctx_v3, vocab, file_format, inputs.gpulayers); if(res==ModelLoadResult::FAIL) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_data->model_filename.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) { printf("\nIncorrect Tensor Size Detected! Retrying GPT-NeoX model loading..."); return res; } n_vocab = neox_ctx_v3.hparams.n_vocab; // determine the required inference memory per token: gpt_neox_eval(neox_ctx_v3, kcpp_data->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, v3_use_scratch); return ModelLoadResult::SUCCESS; } else { //newer format has bit unshuffling SetQuantsUnshuffled(file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5); ModelLoadResult res = gpt_neox_v2_model_load(kcpp_data->model_filename, neox_ctx_v2, vocab, file_format); if(res==ModelLoadResult::FAIL) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_data->model_filename.c_str()); return res; } else if(res==ModelLoadResult::RETRY_LOAD) { printf("\nIncorrect Tensor Size Detected! Retrying GPT-NeoX model loading..."); return res; } n_vocab = neox_ctx_v2.hparams.n_vocab; // determine the required inference memory per token: gpt_neox_v2_eval(neox_ctx_v2, kcpp_data->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); if(logits.size()>0 && file_format==FileFormat::NEOX_2 && !IsNanCheck(logits[0])) { //run the black magic eval to determine if it's redpajama. VERY UGLY HACK! std::vector test_embd = ::gpt_tokenize(vocab, "1 2 3 4 5 6 7"); auto orig_par_res = neox_ctx_v2.hparams.par_res; neox_ctx_v2.hparams.par_res = 0; //test with residual false gpt_neox_v2_eval(neox_ctx_v2, kcpp_data->n_threads, 0, test_embd, logits, mem_per_token); neox_ctx_v2.hparams.par_res = orig_par_res; int topid = std::max_element(logits.begin(),logits.end())-logits.begin(); std::string predicted = vocab.id_to_token[topid].c_str(); auto findresult = predicted.find("8"); if(findresult != std::string::npos && findresult<2) { printf("\n---\nOld RedPajama NeoX Detected! Switching to new format! (use_parallel_residual=False)\n"); ggml_v2_free(neox_ctx_v2.ctx); return ModelLoadResult::RETRY_LOAD; } } return ModelLoadResult::SUCCESS; } } else if(file_format==FileFormat::MPT_1) { bool res = mpt_model_load(kcpp_data->model_filename, mpt_ctx_v3, vocab, inputs.gpulayers); if(res==false) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, kcpp_data->model_filename.c_str()); return ModelLoadResult::FAIL; } n_vocab = mpt_ctx_v3.hparams.n_vocab; // determine the required inference memory per token: mpt_eval(mpt_ctx_v3, kcpp_data->n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token, v3_use_scratch); return ModelLoadResult::SUCCESS; } else { printf("\nUnknown Model, cannot load.\n"); return ModelLoadResult::FAIL; } } bool gpttype_generate_abort() { if(kcpp_data==nullptr) { printf("\nWarning: KCPP text generation not initialized!\n"); } early_abort = true; return true; } std::string gpttype_get_chat_template() { if(kcpp_data==nullptr) { printf("\nWarning: KCPP text generation not initialized!\n"); return ""; } if(file_format!=FileFormat::GGUF_GENERIC || !llama_ctx_v4) { return ""; } // copied from examples/server/utils.hpp::llama_get_chat_template std::string template_key = "tokenizer.chat_template"; // call with NULL buffer to get the total size of the string int32_t res = llama_model_meta_val_str(llama_get_model(llama_ctx_v4), template_key.c_str(), NULL, 0); if (res < 0) { return ""; } std::vector model_template(res + 1, 0); llama_model_meta_val_str(llama_get_model(llama_ctx_v4), template_key.c_str(), model_template.data(), model_template.size()); return std::string(model_template.data(), model_template.size() - 1); } std::vector gpttype_get_token_arr(const std::string & input, bool addbos) { std::vector toks; if(kcpp_data==nullptr) { printf("\nWarning: KCPP text generation not initialized!\n"); return toks; } if(debugmode==1 && !is_quiet) { printf("\nFileFormat: %d, Tokenizing: %s",file_format ,input.c_str()); } TokenizeString(input, toks, file_format,addbos); int tokcount = toks.size(); if(debugmode==1 && !is_quiet) { printf("\nTokens Counted: %d\n",tokcount); } return toks; } std::string gpttype_detokenize(const std::vector & inputids, bool render_special) { if(kcpp_data==nullptr) { printf("\nWarning: KCPP text generation not initialized!\n"); return ""; } std::string output = ""; for (auto eid : inputids) { if(eid<0 || eid>=n_vocab) { continue; } std::string tokenizedstr = FileFormatTokenizeID(eid, file_format, render_special); output += tokenizedstr; } return output; } const std::string & gpttype_get_pending_output() { if(kcpp_data==nullptr) { printf("\nWarning: KCPP text generation not initialized!\n"); return concat_output_reader_copy_poll; } concat_output_mtx.lock(); concat_output_reader_copy_poll = concat_output; concat_output_mtx.unlock(); return concat_output_reader_copy_poll; } const std::vector gpttype_get_top_picks_data() { return top_picks_history; } bool VecContainsIntVal(const std::vector & vec, const int val) { for (const auto &matched : vec) { if (val == matched) { return true; } } return false; } int GetThreadsToUse(bool blasmode) { if (blasmode) { #if defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) return kcpp_data->n_blasthreads; #else return std::min(kcpp_data->n_blasthreads, 4); #endif } return kcpp_data->n_threads; } //this function prepares the clip embds for llava. it's only needed when images change static void PrepareMediaEmbds(const int nctx, const std::vector & media_intro) { bool vision_on = (clp_ctx_v != nullptr && clp_img_data != nullptr); bool audio_on = (clp_ctx_a != nullptr); if (vision_on || audio_on) { int introsize = media_intro.size(); last_media_mem.clear(); for(int i=0;i media_data_buffer = kcpp_base64_decode(media_obj); if(!media_objects[i].is_audio && vision_on) { //images if (!clip_image_load_from_bytes(media_data_buffer.data(), media_data_buffer.size(), clp_img_data, vision_max_res)) { //failed to load image printf("\nError: Clip image %d failed to load!",i); } else { if(debugmode==1 && !is_quiet) { printf("\nCreating clip image embed..."); } media_chunk chunk; if (!llava_image_embed_make_with_clip_img(clp_ctx_v, kcpp_data->n_threads, clp_img_data, &chunk.clp_img_embd, &chunk.clp_image_tokens, &chunk.nx, &chunk.ny)) { printf("\nError: Clip image %d failed to create embd!",i); } if(debugmode==1 && !is_quiet) { printf("\nVision Clip Embed %i used Tokens: %d",i,chunk.clp_image_tokens); } int cliptokensneeded = chunk.clp_image_tokens; if(cliptokensneeded>0 && cliptokensneeded < nctx) { int tokcnt = (chunk.clp_image_tokens + media_objects[i].chunk_start_seq.size() + media_objects[i].chunk_end_seq.size()); if(i==0) { tokcnt += introsize; } for(int n=0;n pcmf32; int samplerate = clip_get_hparams(clp_ctx_a)->audio_sample_rate; bool ok = kcpp_decode_audio_from_buf(media_data_buffer.data(), media_data_buffer.size(), samplerate, pcmf32); if (!ok) { printf("\nError: Clip audio %d failed to convert!",i); continue; } std::vector mel_spec_chunks; ok = audio_preproc->preprocess(pcmf32.data(), pcmf32.size(), mel_spec_chunks); if (!ok) { printf("\nError: Clip audio %d failed to load!",i); continue; } // consider each mel_spec as a separate audio chunk int total_chunk_tokens = 0; for (auto & mel_spec : mel_spec_chunks) { media_chunk chunk; bool ok = audio_embd_make_with_clip_img(clp_ctx_a, kcpp_data->n_threads, mel_spec, &chunk.clp_img_embd, &chunk.clp_image_tokens); if (!ok) { printf("\nError: Clip audio chunk in %d failed to make embd!",i); } else { if(debugmode==1 && !is_quiet) { printf("\nAudio Clip %i Embed Chunk used Tokens: %d",i,chunk.clp_image_tokens); } total_chunk_tokens += chunk.clp_image_tokens; media_objects[i].mediachunks.push_back(chunk); } } int cliptokensneeded = total_chunk_tokens; if(cliptokensneeded>0 && cliptokensneeded < nctx) { int tokcnt = (cliptokensneeded + media_objects[i].chunk_start_seq.size() + media_objects[i].chunk_end_seq.size()); if(i==0) { tokcnt += introsize; } for(int n=0;n tmp; TokenizeString(stopper, tmp, file_format, false); if(tmp.size()==1) //tokenizes to exactly 1 special token { int specialid = tmp[0]; std::string tokenizedstr = FileFormatTokenizeID(specialid, file_format); if(tokenizedstr=="") //must NOT have a text representation { special_stop_sequence.push_back(specialid); } } } } //handle custom token bans and antislop phrase banning banned_phrases.clear(); delayed_generated_tokens_limit = 0; antislop_banned_token_ids.clear(); banned_tokens.clear(); for(int x=0;x toks; TokenizeString(word, toks, file_format, false); int tokcount = toks.size(); if(tokcount==0) { continue; } if(tokcount==1 && word.length()<12) //only use banned tokens for single characters, we can assume that means less than 12 chars usually { banned_tokens.push_back(word); } else { tokcount += 3; //add some extra buffer delayed_generated_tokens_limit = (tokcount > delayed_generated_tokens_limit ? tokcount : delayed_generated_tokens_limit); banned_phrases.push_back(word); } } } banned_token_ids.clear(); toolcall_prevented_ids.clear(); if(banned_tokens.size()>0) { if(debugmode==1 && !is_quiet) { printf("\nBanning %zu single character sequences...",banned_tokens.size()); } for(int v=0;v0) { printf("\nBanned a total of %zu phrases, with max token count of %d.\n",banned_phrases.size(),delayed_generated_tokens_limit); } logit_biases.clear(); for(int x=0;x= 0 && t_id < n_vocab && bias!=0) { logit_biases.push_back(inputs.logit_biases[x]); } } std::string addedmemory = inputs.memory; std::string negative_prompt = inputs.negative_prompt; //clear previous run llava embd memory, just-in-time free for(int i=0;iprompt = inputs.prompt; kcpp_data->seed = inputs.seed; kcpp_data->n_predict = inputs.max_length; kcpp_data->top_k = inputs.top_k; kcpp_data->top_p = inputs.top_p; kcpp_data->min_p = inputs.min_p; kcpp_data->typical_p = inputs.typical_p; kcpp_data->tfs_z = inputs.tfs; kcpp_data->nsigma = inputs.nsigma; kcpp_data->temp = inputs.temperature; kcpp_data->repeat_last_n = inputs.rep_pen_range; kcpp_data->rep_pen_slope = inputs.rep_pen_slope; kcpp_data->repeat_penalty = inputs.rep_pen; kcpp_data->presence_penalty = inputs.presence_penalty; kcpp_data->mirostat = inputs.mirostat; kcpp_data->mirostat_eta = inputs.mirostat_eta; kcpp_data->mirostat_tau = inputs.mirostat_tau; kcpp_data->dry_multiplier = inputs.dry_multiplier; kcpp_data->dry_base = inputs.dry_base; kcpp_data->dry_allowed_length = inputs.dry_allowed_length; kcpp_data->dry_penalty_last_n = inputs.dry_penalty_last_n; kcpp_data->xtc_threshold = inputs.xtc_threshold; kcpp_data->xtc_probability = inputs.xtc_probability; kcpp_data->dynatemp_range = inputs.dynatemp_range; kcpp_data->dynatemp_exponent = inputs.dynatemp_exponent; kcpp_data->n_ctx = inputs.max_context_length; kcpp_data->smoothing_factor = inputs.smoothing_factor; kcpp_data->smoothing_curve = inputs.smoothing_curve; kcpp_data->adaptive_target = inputs.adaptive_target; kcpp_data->adaptive_decay = inputs.adaptive_decay; adaptive_p_weighted_sum = 0; adaptive_p_total_weight = 0; if(kcpp_data->adaptive_target > 0.0f && kcpp_data->adaptive_decay<1.0f) { adaptive_p_weighted_sum = kcpp_data->adaptive_target / (1.0f - kcpp_data->adaptive_decay); adaptive_p_total_weight = 1.0f / (1.0f - kcpp_data->adaptive_decay); } // Parse dry sequence breakers / restart sequences kcpp_data->dry_sequence_breakers.clear(); dry_sequence_breakers.clear(); if (kcpp_data->dry_multiplier > 0) { for (int x = 0; x < inputs.dry_sequence_breakers_len; ++x) { std::string word = inputs.dry_sequence_breakers[x]; if (word != "") { kcpp_data->dry_sequence_breakers.push_back(word); } } if (kcpp_data->dry_sequence_breakers.size() > 0) { // Restrict the maximum length of sequences used as sequence breakers. There are // very few use cases for a long sequence breaker, and limiting the max length // prevents a potential denial of service attack in which long repetitive sequence // breakers could result in slow DRY sampling with a suitably crafted context. const int MAX_CHAR_LEN = 40; const int MAX_SEQ_LEN = 20; if (debugmode == 1 && !is_quiet) { printf("\nProcessing %zu dry break strings...", kcpp_data->dry_sequence_breakers.size()); } for (auto sequence_break : kcpp_data->dry_sequence_breakers) { if (sequence_break.size() > MAX_CHAR_LEN) { sequence_break.resize(MAX_CHAR_LEN); } GetOverlappingTokenSequences(sequence_break, dry_sequence_breakers, MAX_SEQ_LEN); } if (debugmode == 1 && !is_quiet) { int trivial = 0, non_trivial = 0; for (const auto &seq : dry_sequence_breakers) { if (seq.second.empty()) { ++trivial; } else { ++non_trivial; } } printf("\nFound a total of %zu restart heads, %d trivial, %d non-trivial.\n", dry_sequence_breakers.size(), trivial, non_trivial); } } } //need to add a cursed hack to improve coherency for GLM4, by ensuring injection for gmask, sop and an extra space //any complaints please direct them to henky //deepseek2 is actually used for glm 4.7 flash if (file_format == FileFormat::GGUF_GENERIC && (file_format_meta.model_architecture == llm_arch::LLM_ARCH_GLM4 || file_format_meta.model_architecture == llm_arch::LLM_ARCH_GLM4_MOE || file_format_meta.model_architecture == llm_arch::LLM_ARCH_DEEPSEEK2)) { std::string temp = gpttype_get_chat_template(); if (temp.find("[gMASK]") != std::string::npos) { if (addedmemory == "") { if (!kcpp_data->prompt.empty() && kcpp_data->prompt.rfind("[gMASK]", 0) == 0) { //check startswith kcpp_data->prompt.erase(0, 7); } if (!kcpp_data->prompt.empty() && kcpp_data->prompt.rfind("", 0) == 0) { //check startswith kcpp_data->prompt.erase(0, 5); } if (!kcpp_data->prompt.empty() && kcpp_data->prompt[0] == ' ') { // check for leading space kcpp_data->prompt.erase(0, 1); } addedmemory = "[gMASK] "; } else { if (!addedmemory.empty() && addedmemory.rfind("[gMASK]", 0) == 0) { //check startswith addedmemory.erase(0, 7); } if (!addedmemory.empty() && addedmemory.rfind("", 0) == 0) { //check startswith addedmemory.erase(0, 5); } if (!addedmemory.empty() && addedmemory[0] == ' ') { // check for leading space addedmemory.erase(0, 1); } addedmemory = "[gMASK] " + addedmemory; } } } // Round two, gemma4 boogalo // If it breaks your stuff you can blame me again (Or thank me because you can actually use gemma 31B stable now). - Henk // For the record, the GLM4 one didn't break anyone and everyone forgot GLM4 needed this :D if (file_format == FileFormat::GGUF_GENERIC && (file_format_meta.model_architecture == llm_arch::LLM_ARCH_GEMMA4)) { std::string temp = gpttype_get_chat_template(); if (temp.find("<|channel>thought") != std::string::npos) { const std::string channel_open = "<|channel>"; const std::string channel_close = ""; const std::string channel_prefix = channel_open + channel_close; const bool has_open = kcpp_data->prompt.find(channel_open) != std::string::npos; const bool has_close = kcpp_data->prompt.find(channel_close) != std::string::npos; // If neither opening nor closing tag is present anywhere, prepend both if (!has_open && !has_close) { kcpp_data->prompt = channel_prefix + kcpp_data->prompt; } } } bool stream_sse = inputs.stream_sse; bool allow_regular_prints = (!is_quiet && debugmode!=-1); std::string grammarstr = inputs.grammar; bool grammar_retain_state = inputs.grammar_retain_state; if(grammar_retain_state) { if(grammarstr=="" || current_grammar!=grammarstr) //if grammar is identical, retain state { load_grammar(grammarstr); } } else { load_grammar(grammarstr); } current_grammar = grammarstr; if (kcpp_data->repeat_last_n < 1) { kcpp_data->repeat_last_n = 1; } if (kcpp_data->rep_pen_slope > 1 || kcpp_data->rep_pen_slope<=0) { kcpp_data->rep_pen_slope = 1; } if (kcpp_data->top_k < 1) { kcpp_data->top_k = n_vocab; // all tokens in the vocabulary should be considered if top k is disabled } if (kcpp_data->seed <= 0 || kcpp_data->seed==0xFFFFFFFF) { kcpp_data->seed = (((uint32_t)time(NULL)) % 1000000u); if(debugmode==1 && !is_quiet) { printf("\nUsing Seed: %d",kcpp_data->seed); } } // tokenize the prompt std::vector embd_inp; std::vector embd_inp_mem; //for storing added memory std::vector media_intro; //added before media list std::vector guidance_embd; //holds the guidance prompt bool media_embds_built = false; int32_t nctx = kcpp_data->n_ctx; TokenizeString(kcpp_data->prompt, embd_inp, file_format, add_bos_token); TokenizeString("\nAttached Media:\n", media_intro, file_format, true); if(media_composite_image_signature=="") { last_media_mem.clear(); } if(media_data_changed) { PrepareMediaEmbds(nctx, media_intro); media_embds_built = true; } if(addedmemory!="") { TokenizeString(addedmemory, embd_inp_mem, file_format, add_bos_token); } //truncate to front of the prompt if its too long if (embd_inp.size() + kcpp_data->n_predict > nctx) { //get bos token std::vector bos; TokenizeString("", bos, file_format, add_bos_token); int offset = embd_inp.size() - nctx + kcpp_data->n_predict; embd_inp = std::vector(embd_inp.begin() + offset, embd_inp.end()); //replace bos into front if exists if(bos.size()>0 && embd_inp.size()>0) { embd_inp[0] = bos[0]; } } if(last_media_mem.size()>0) //stick the llava mem before the added mem { if(last_media_mem.size() + kcpp_data->n_predict + 4 > nctx) { printf("\nWarning: Too many multimodal tokens, max context exceeded! They will be ignored!\n"); } else { std::vector bos; TokenizeString("", bos, file_format, add_bos_token); if(embd_inp_mem.size()>0) //remove existing bos if exists { if (bos.size()>0 && !embd_inp_mem.empty() && bos[0]==embd_inp_mem[0]) { embd_inp_mem.erase(embd_inp_mem.begin()); } } //append llava dummy tokens embd_inp_mem.insert(embd_inp_mem.begin(), last_media_mem.begin(), last_media_mem.end()); if (bos.size() > 0 && embd_inp_mem.size() > 0) { embd_inp_mem.insert(embd_inp_mem.begin(), bos[0]); //insert bos at front } //shorten memory if needed if (embd_inp_mem.size() + kcpp_data->n_predict + 4 > nctx) { int limit = nctx - (kcpp_data->n_predict + 4); if (embd_inp_mem.size() > limit) { embd_inp_mem.resize(limit); } } } } std::vector negprompt_tokens; int guidance_n_past = 0; if(guidance_ctx) { llama_memory_clear(llama_get_memory(guidance_ctx),true); //prepare negative prompt if(negative_prompt!="" && inputs.guidance_scale!=1.0f) { TokenizeString(negative_prompt+"\n", negprompt_tokens, file_format, add_bos_token); } } //added special memory, overwrite if needed if (embd_inp_mem.size() + negprompt_tokens.size() > 0) { //remove bos token from prompt, it'll be taken from memory std::vector bos; TokenizeString("", bos, file_format, add_bos_token); if (bos.size()>0 && !embd_inp.empty() && bos[0]==embd_inp[0]) { //strip away bos if exists embd_inp.erase(embd_inp.begin()); } //shorten memory if needed if (embd_inp_mem.size() > 0 && embd_inp_mem.size() + kcpp_data->n_predict + 4 > nctx) { int offset = embd_inp_mem.size() - nctx + kcpp_data->n_predict + 4; embd_inp_mem = std::vector(embd_inp_mem.begin() + offset, embd_inp_mem.end()); //replace bos into front if exists if(bos.size()>0 && embd_inp_mem.size()>0) { embd_inp_mem[0] = bos[0]; } } //shorten main prompt by trimming the front if needed int addmemtokens = embd_inp_mem.size() + negprompt_tokens.size() + 1; int totalsize = (addmemtokens + embd_inp.size() + kcpp_data->n_predict); if(totalsize > nctx) { int excess = totalsize - nctx; if (embd_inp.size() >= excess) { embd_inp.erase(embd_inp.begin(), embd_inp.begin() + excess); } else { embd_inp.clear(); } } //stick memory to front of prompt embd_inp.insert(embd_inp.begin(), embd_inp_mem.begin(), embd_inp_mem.end()); if(add_bos_token && embd_inp.size()>0 && bos.size()>0 && bos[0]!=embd_inp[0]) { embd_inp.insert(embd_inp.begin(), bos[0]); //insert bos at front, if added } } //prepare negative prompt if(guidance_ctx && negprompt_tokens.size()>0 && inputs.guidance_scale!=1.0f) { guidance_embd = embd_inp; //clone main prompt std::vector bos; TokenizeString("", bos, file_format, add_bos_token); if (bos.size()>0 && !guidance_embd.empty() && bos[0]==guidance_embd[0]) { guidance_embd.erase(guidance_embd.begin()); } // Insert at the beginning of everything. size is already handled guidance_embd.insert(guidance_embd.begin(), negprompt_tokens.begin(), negprompt_tokens.end()); //eval the guidance prompt printf("\nPreparing Negative Prompt (%zu tokens)", guidance_embd.size()); kcpp_embd_batch batch = kcpp_embd_batch(guidance_embd, 0, use_mrope, false); auto er = llama_decode(guidance_ctx, batch.batch); if(er!=0) { printf("\nProcess Negative Prompt Failed! (code:%d)\n",er); } guidance_n_past += guidance_embd.size(); } //determine how much npast we have to rewind from the current state std::vector embd; int last_n_size = kcpp_data->repeat_last_n; last_n_tokens.resize(last_n_size); std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); n_past = 0; if (debugmode==1 && !is_quiet) { std::string outstr = ""; printf("\n\n[Debug: Dump %zu Raw Input Tokens]\n",embd_inp.size()); outstr += get_tok_vec_str(embd_inp); printf("%s\n", RemoveBell(outstr).c_str()); } bool is_recurrent = false; if(file_format==FileFormat::GGUF_GENERIC) { const llama_model * mdl = llama_get_model(llama_ctx_v4); if(llama_model_is_recurrent(mdl) || llama_model_is_hybrid(mdl)) { is_recurrent = true; } } bool blank_prompt = (addedmemory=="" && kcpp_data->prompt==""); //smart cache logic if(kcpp_data->smartcache && file_format==FileFormat::GGUF_GENERIC) { bool shiftable = true; if(!kcpp_data->use_contextshift || is_recurrent) { shiftable = false; } //we handle recurrent models differently since they require a full subset match if(is_recurrent) { bool curr_usable = FullyContainedPrefix(current_context_tokens,embd_inp); if(!curr_usable) { //see if we have any other usable contexts out there int bestslot = -1; int bestlen = 0; int identical_slot = get_identical_existing_slot(); //see if the slot already exists // printf("\n\nEMBD_INPUT: %d\n",embd_inp.size()); // for(int x=0;xbestlen) { bestlen = target_len; bestslot = i; } } if(bestslot!=-1) //found a good slot to load { int oldest_slot = get_oldest_slot(bestslot); if(oldest_slot!=bestslot) { if(current_context_tokens.size() > 32) //do not save tiny contexts { if(identical_slot==-1) { printf("\n[SmartCache RNN Match of %d tokens in slot %d. Saving into slot %d and switching...]\n",bestlen,bestslot,oldest_slot); gpttype_save_state_kv(oldest_slot); } else { printf("\n[SmartCache RNN Match of %d tokens in slot %d. Already saved in slot %d, switching...]\n",bestlen,bestslot,identical_slot); touch_slot(identical_slot); } } else { printf("\n[SmartCache RNN Match of %d tokens in slot %d. Switching...]\n",bestlen,bestslot); } gpttype_load_state_kv(bestslot); } } else { if(current_context_tokens.size() > 32) //do not save tiny contexts { if(identical_slot==-1) { int oldest_slot = get_oldest_slot(-1); printf("\n[SmartCache RNN No Match, Saving into slot %d...]\n",oldest_slot); gpttype_save_state_kv(oldest_slot); } else { printf("\n[SmartCache RNN No Match, Already saved in slot %d]\n",identical_slot); touch_slot(identical_slot); } } } } } else if(!(shiftable && CanContextShift(current_context_tokens, embd_inp, inputs.max_length, nctx))) //If CanBeShifted is true, do nothing. Allow shift as normal. { // If CanBeShifted is false, calculate prefix similarity with current_context_tokens of current context // If similarity > similarity_threshold, do nothing. Allow fast forward as normal. float similarity = ComputePrefixMatchPercent(current_context_tokens,embd_inp); const float similarity_threshold = 0.7f; if(similarity < similarity_threshold) { // Otherwise, for each of the currently used kv state slots, calculate ComputePrefixMatch and CanBeShifted // If similarity to any of them > similarity_threshold or CanBeShifted, save current slot and switch to that slot. // Whenever loading or saving current slot, simply tag the slot with a timestamp. When running out of slots after all 3 are used, delete the oldest timestamped slot. // Slot loading and saving completely reuses gpttype_load_state_kv and gpttype_save_state_kv, nothing else is needed. bool foundswap = false; int identical_slot = get_identical_existing_slot(); //see if a slot already exists with identical data to current for(int i=0;i similarity_threshold || (shiftable && CanContextShift(savestates[i].savestate_context_tokens, embd_inp, inputs.max_length, nctx))) { //found a match. save to the oldest slot thats not the one we are loading int oldest_slot = get_oldest_slot(i); if(oldest_slot!=i) { if(current_context_tokens.size() > 32) //do not save tiny contexts { if(identical_slot==-1) { printf("\n[SmartCache Match of %.2f in slot %d. Saving into slot %d and switching...]\n",similaritybeat,i,oldest_slot); gpttype_save_state_kv(oldest_slot); } else { printf("\n[SmartCache Match of %.2f in slot %d. Already saved in slot %d, switching...]\n",similaritybeat,i,identical_slot); touch_slot(identical_slot); } } else { printf("\n[SmartCache Match of %.2f in slot %d. Switching...]\n",similaritybeat,i); } gpttype_load_state_kv(i); foundswap = true; break; } } } if(!foundswap) //could not match anything, just save kv and continue { if(current_context_tokens.size() > 32) //do not save tiny contexts { if(identical_slot==-1) { int oldest_slot = get_oldest_slot(-1); printf("\n[SmartCache No Match, Saving into slot %d...]\n",oldest_slot); gpttype_save_state_kv(oldest_slot); } else { printf("\n[SmartCache No Match, Already saved in slot %d]\n",identical_slot); touch_slot(identical_slot); } } } } } } if (file_format == FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2 || is_recurrent) { if(!blank_prompt) { if(kcpp_data->use_fastforward) { ContextFastForward(current_context_tokens, embd_inp, n_past, last_n_tokens, nctx, smartcontext, false, true, 0); } } if(is_recurrent) { if(n_past==0) { llama_memory_clear(llama_get_memory(llama_ctx_v4),true); if(draft_ctx) { llama_memory_clear(llama_get_memory(draft_ctx),true); } } else { if(current_context_tokens.size()>0 && last_n_tokens.size()>0) { int maxedpos = llama_memory_seq_pos_max(llama_get_memory(llama_ctx_v4),0); if(maxedpos+2==n_past) { //kcpp: a very dirty hack for rnn models. this happens because the very last token of the last turn //does not actually get processed but is still added to current_context_tokens. if the instruct start tag starts with that same token //it might get wrongly fast forwarded and we will get an off by 1 error. //todo: figure out a better way to solve this rubbish int tail = last_n_tokens[last_n_tokens.size()-1]; last_n_tokens.pop_back(); current_context_tokens.pop_back(); n_past -=1; embd_inp.insert(embd_inp.begin(), 1, tail); } else if(maxedpos==n_past) { n_past += 1; } //it is generally preferable to not have the embd_inp array empty unless doing so would cause an error // if(embd_inp.size()==0 && current_context_tokens.size()>0) // { // embd_inp.push_back(current_context_tokens[current_context_tokens.size()-1]); // current_context_tokens.pop_back(); // n_past -= 1; // } } } } } else { bool triggersc = kcpp_data->use_smartcontext; if(!blank_prompt) //special case for blank prompts, no fast forward or shifts { if(kcpp_data->use_fastforward && kcpp_data->use_contextshift && (file_format == FileFormat::GGUF_GENERIC)) { DoContextShifting(llama_ctx_v4, draft_ctx, current_context_tokens, embd_inp, inputs.max_length, nctx, false); triggersc = false; } if(kcpp_data->use_fastforward) { ContextFastForward(current_context_tokens, embd_inp, n_past, last_n_tokens, nctx, smartcontext, triggersc, false, 4); } } if(file_format == FileFormat::GGUF_GENERIC) { if(n_past==0) //force full clear { llama_memory_clear(llama_get_memory(llama_ctx_v4),true); } else { llama_memory_seq_rm(llama_get_memory(llama_ctx_v4), 0, n_past, -1); } if(draft_ctx) { llama_memory_seq_rm(llama_get_memory(draft_ctx), 0, n_past, -1); } } } bool blasmode = (embd_inp.size() >= 32 && kcpp_cpu_has_blas() && kcpp_data->n_batch>=32); if(current_context_tokens.size()>n_past) { current_context_tokens.resize(n_past); } remaining_tokens = kcpp_data->n_predict; int input_consumed = 0; std::mt19937 rng(kcpp_data->seed); //do some reservation so we don't have to realloc generated_tokens.reserve(remaining_tokens+16); //prepare sampler order std::vector sampler_order; if(inputs.sampler_len<=0) //list by value { sampler_order = { KCPP_SAMPLER_REP_PEN, KCPP_SAMPLER_TOP_K, KCPP_SAMPLER_TOP_A, KCPP_SAMPLER_TFS, KCPP_SAMPLER_TYP, KCPP_SAMPLER_TOP_P, KCPP_SAMPLER_TEMP }; } else { for(int i=0;istate_in = nullptr; } else { rwkv_ctx_v3->state_in = nullptr; } } else { if (file_format == FileFormat::RWKV_1) { rwkv_ctx_v2->state_in = rwkv_ctx_v2->state_out; } else { rwkv_ctx_v3->state_in = rwkv_ctx_v3->state_out; } //if it's empty, push in the final previous token if(embd_inp.size()==0 && current_context_tokens.size()>0) { embd_inp.push_back(current_context_tokens[current_context_tokens.size()-1]); current_context_tokens.pop_back(); } } } if(n_vocab<=0) { printf("\nWarning! n_vocab is invalid, maybe bad format!"); } if(allow_regular_prints) { printf("\n"); } if (debugmode==1 && !is_quiet) { std::string outstr = ""; // printf("\n[Debug: Dump Forwarded Input Tokens]\n"); // outstr += get_tok_vec_str(embd_inp); // outstr += "\n"; outstr += "[Debug: embd_inp="+std::to_string(embd_inp.size())+" n_past="+std::to_string(n_past)+" Context Size = " + std::to_string(current_context_tokens.size()) + "]"; // outstr += "\n"; // outstr += get_tok_vec_str(current_context_tokens); printf("%s\n\n", RemoveBell(outstr).c_str()); } while (remaining_tokens > 0 && !early_abort) { gpt_vocab::id id = 0; // predict unsigned int embdsize = embd.size(); //print progress if (!startedsampling && allow_regular_prints) { printf("\rProcessing Prompt%s (%d / %zu tokens)", (blasmode ? " [BATCH]" : ""), input_consumed, embd_inp.size()); } fflush(stdout); if (embdsize > 0) { bool evalres = false; if (file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2) { evalres = (llama_v2_eval(llama_ctx_v2, embd.data(), embdsize, n_past, GetThreadsToUse(blasmode))==0); } else if(file_format == FileFormat::GGJT_3) { evalres = (llama_v3_eval(llama_ctx_v3, embd.data(), embdsize, n_past, GetThreadsToUse(blasmode))==0); } else if(file_format == FileFormat::GGUF_GENERIC) { if(guidance_ctx && negprompt_tokens.size()>0 && inputs.guidance_scale!=1.0f && embd.size()==1 && startedsampling) { //eval for negative prompt kcpp_embd_batch gbatch = kcpp_embd_batch(embd, guidance_n_past, use_mrope, false); auto er = llama_decode(guidance_ctx, gbatch.batch); if(er!=0) { printf("\nGenerate with Negative Prompt Failed! (code:%d)\n",er); } guidance_n_past += 1; } if(embd.size()!=1 || draft_ctx==nullptr || remaining_tokens<=speculative_chunk_amt || grammar!=nullptr || startedsampling==false) //for large batch, or if no draft model, PP/TG as usual { draft_used = false; kcpp_embd_batch batch = kcpp_embd_batch(embd, n_past, use_mrope, false); int32_t decode_status = -1; bool skipdecodelater = false; //if running rnn model in smartcache mode, save progress a little bit before the final PP is done //this helps solve token boundary mutation issues if(draft_ctx==nullptr && embd.size()>1 && !startedsampling && input_consumed==embd_inp.size() && input_consumed>64) { if(kcpp_data->smartcache && is_recurrent && file_format==FileFormat::GGUF_GENERIC && current_context_tokens.size() > 32) { if(embd.size()<=48) { //directly snapshot for a small batch smartcache_quick_snapshot(); rnn_snapshot_taken = true; } else { skipdecodelater = true; //decode until nearly done, then snapshot and decode the last 32 std::vector> parts = split_big_vector_in_two(embd,32); int temp_past = n_past; evalres = true; for(int p=0;p chunk = parts[p]; kcpp_embd_batch smallbatch = kcpp_embd_batch(chunk, temp_past, use_mrope, false); decode_status = llama_decode(llama_ctx_v4, smallbatch.batch); if(p==0 && decode_status==1) { skipdecodelater = false; break; //big pp failed } evalres = (evalres && (decode_status==0)); temp_past += chunk.size(); } } } } if(!skipdecodelater) { decode_status = llama_decode(llama_ctx_v4, batch.batch); if(decode_status==1 && embd.size()>128) { printf("Couldn't find a big KV slot. Retry with smaller batch size of 128...\n"); std::vector> parts = split_big_vector(embd,128); int temp_past = n_past; evalres = true; for(int p=0;p chunk = parts[p]; kcpp_embd_batch smallbatch = kcpp_embd_batch(chunk, temp_past, use_mrope, false); int32_t decode_status2 = llama_decode(llama_ctx_v4, smallbatch.batch); if(debugmode==1 && !is_quiet) { printf("Retry chunk: %zu at %d... status: %s\n",chunk.size(),temp_past,(decode_status2==0?"ok":"fail")); } evalres = (evalres && (decode_status2==0)); temp_past += chunk.size(); } } else { evalres = (decode_status==0); } } if(draft_ctx) { evalres = (evalres && (llama_decode(draft_ctx, batch.batch)==0)); } } else { //individual tokens AND speculative is used (generation) draft_used = true; draft_results = speculative_decoding_eval_chunk(draft_ctx, llama_ctx_v4, embd, n_vocab, n_past); evalres = draft_results.draft_success; if(debugmode==1 && !is_quiet) { std::string draftedtoks = get_tok_vec_str(draft_results.draftids); printf("\nDrafted %d Tokens: [%s]\n",speculative_chunk_amt,draftedtoks.c_str()); } } } else if(file_format==FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2) { if (file_format == FileFormat::RWKV_1) { evalres = rwkv_v2_eval(rwkv_ctx_v2, embd[0], rwkv_ctx_v2->state_in, rwkv_ctx_v2->state_out, rwkv_ctx_v2->logits_out); memcpy(logits.data(), rwkv_ctx_v2->logits_out, sizeof(float) * rwkv_vocab.size()); rwkv_ctx_v2->state_in = rwkv_ctx_v2->state_out; } else { if(embd.size()>1) { evalres = rwkv_eval_sequence(rwkv_ctx_v3, GetThreadsToUse(blasmode), (uint32_t*)embd.data(), embd.size(), rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, rwkv_ctx_v3->logits_out); } else { bool ignoreLogits = (!startedsampling && ((int)embd_inp.size() > input_consumed + 2)); evalres = rwkv_eval(rwkv_ctx_v3, GetThreadsToUse(blasmode), embd[0], rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, ignoreLogits?nullptr:rwkv_ctx_v3->logits_out); } memcpy(logits.data(), rwkv_ctx_v3->logits_out, sizeof(float) * rwkv_vocab.size()); rwkv_ctx_v3->state_in = rwkv_ctx_v3->state_out; } } else if(file_format==FileFormat::GPT2_1) { evalres = legacy_gpt2_eval(gpt2_ctx_v1, GetThreadsToUse(blasmode), n_past, embd, logits, mem_per_token, file_format); } else if(file_format==FileFormat::GPT2_2 || file_format==FileFormat::GPT2_3) { evalres = gpt2_v2_eval(gpt2_ctx_v2, GetThreadsToUse(blasmode), n_past, embd, logits, mem_per_token, file_format); } else if(file_format==FileFormat::GPT2_4) { evalres = gpt2_eval(gpt2_ctx_v3, GetThreadsToUse(blasmode), n_past, embd, logits, mem_per_token, v3_use_scratch); } else if(file_format==FileFormat::NEOX_1 || file_format == FileFormat::NEOX_2 || file_format == FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5) { evalres = gpt_neox_v2_eval(neox_ctx_v2, GetThreadsToUse(blasmode), n_past, embd, logits, mem_per_token); } else if(file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7) { evalres = gpt_neox_eval(neox_ctx_v3, GetThreadsToUse(blasmode), n_past, embd, logits, mem_per_token, v3_use_scratch); } else if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2) { evalres = legacy_gptj_eval(gptj_ctx_v1, GetThreadsToUse(blasmode), n_past, embd, logits, mem_per_token, file_format); } else if(file_format==FileFormat::GPTJ_3 || file_format==FileFormat::GPTJ_4) { evalres = gptj_v2_eval(gptj_ctx_v2, GetThreadsToUse(blasmode), n_past, embd, logits, mem_per_token); } else if(file_format==FileFormat::GPTJ_5) { evalres = gptj_eval(gptj_ctx_v3, GetThreadsToUse(blasmode), n_past, embd, logits, mem_per_token, v3_use_scratch); } else if(file_format==FileFormat::MPT_1) { evalres = mpt_eval(mpt_ctx_v3, GetThreadsToUse(blasmode), n_past, embd, logits, false, mem_per_token, v3_use_scratch); } else { printf("\nCannot find eval function\n"); } if (!evalres) { fprintf(stderr, "\nFailed to predict at token position %d! Check your context buffer sizes!\n",n_past); media_composite_image_signature = ""; //force invalidate output.text = nullptr; output.status = 0; output.prompt_tokens = output.completion_tokens = 0; last_stop_reason = stop_reason::ERROR_ENCOUNTERED; output.stopreason = last_stop_reason; generation_finished = true; return output; } firstdecodedone = true; } n_past += embd.size(); embd.clear(); if (!early_abort && (int)embd_inp.size() <= input_consumed) //if decoding was aborted, DO NOT perform any sampling { // out of user input, sample next token const float top_k = kcpp_data->top_k; const float top_p = kcpp_data->top_p; const float min_p = kcpp_data->min_p; const float temp = kcpp_data->temp; const float top_a = inputs.top_a; const float repeat_penalty = kcpp_data->repeat_penalty; const float presence_penalty = kcpp_data->presence_penalty; const float typical_p = kcpp_data->typical_p; const float tfs_z = kcpp_data->tfs_z; const float nsigma = kcpp_data->nsigma; const float dynatemp_range = kcpp_data->dynatemp_range; const float dynatemp_exponent = kcpp_data->dynatemp_exponent; const float smoothing_factor = kcpp_data->smoothing_factor; const float smoothing_curve = kcpp_data->smoothing_curve; const float adaptive_target = kcpp_data->adaptive_target; const float adaptive_decay = kcpp_data->adaptive_decay; if (!startedsampling) { startedsampling = true; time1 = timer_check(); timer_start(); if(allow_regular_prints) { printf("\n"); } //if running rnn model in smartcache mode, save progress before each gen if(kcpp_data->smartcache && is_recurrent && file_format==FileFormat::GGUF_GENERIC && current_context_tokens.size() > 32) { if(!rnn_snapshot_taken) { smartcache_quick_snapshot(); } else { //snapshot to specific slot only if(rnn_reusable_slot_idx!=-1) { smartcache_quick_snapshot(rnn_reusable_slot_idx); } else { rnn_reusable_slot_idx = smartcache_quick_snapshot(); } } } } const std::vector eog_tokens = GetEogIDs(file_format,n_vocab); float * logitsPtr; float lowestLogit = 0; int btsize = banned_token_ids.size(); int tcpreventsize = toolcall_prevented_ids.size(); //sample pending logits. usually only 1, unless speculative decoding int logits_to_sample = 1; int logits_sampled = 0; bool abort_draft = false; if(draft_used) { logits_to_sample = draft_results.drafted_amount; } while(logits_sampled0 && !abort_draft && !early_abort) { if(logits_sampled>0) { //this is not the first loop, so we need to increment some things n_past += 1; } if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2 || file_format == FileFormat::GGJT_3 || file_format == FileFormat::GGUF_GENERIC) { if(file_format == FileFormat::GGUF_GENERIC) { if(draft_used) { logitsPtr = draft_results.actual_logits[logits_sampled]; } else { logitsPtr = llama_get_logits(llama_ctx_v4); } } else if(file_format == FileFormat::GGJT_3) { logitsPtr = llama_v3_get_logits(llama_ctx_v3); } else { logitsPtr = llama_v2_get_logits(llama_ctx_v2); } lowestLogit = LowestLogit(logitsPtr,n_vocab); } else { logitsPtr = logits.data(); //legacy rwkv, neox, gptj etc lowestLogit = LowestLogit(logits); } if(!firstdecodedone && current_context_tokens.size()>0) { if(loaded_latest_logits.size()>0) { if(debugmode==1 && !is_quiet) { printf("\nLoading %d saved logits...\n",loaded_latest_logits.size()); } //first decode was not done. this can happen when reloading from a perfectly matched state. //to prevent a catastrophic failure, we must prepare emergency logits for usage logitsPtr = loaded_latest_logits.data(); lowestLogit = LowestLogit(logitsPtr,n_vocab); } else { printf("\nNo cached logits and we need them, emergency fallback with degraded quality...\n"); embd.clear(); embd.push_back(current_context_tokens[current_context_tokens.size()-1]); break; } } //if adaptive p sampling is used, we need to cache the original probabilities std::vector original_candidates; if(adaptive_target > 0.0f) { original_candidates.reserve(n_vocab); for (llama_token token_id = 0; token_id < n_vocab; token_id++) { original_candidates.emplace_back(llama_token_data{token_id, logitsPtr[token_id], 0.0f}); } llama_token_data_array original_candidates_p = { original_candidates.data(), original_candidates.size(), false }; sample_softmax(&original_candidates_p,false); } if(file_format == FileFormat::GGUF_GENERIC && guidance_ctx && negprompt_tokens.size()>0 && inputs.guidance_scale!=1.0f) { sample_guidance(llama_ctx_v4, guidance_ctx, n_vocab, inputs.guidance_scale); } //handle token bans if (!inputs.allow_eos_token && !inputs.bypass_eos_token) { // set the logit of the eos token to very low to avoid sampling it for(int i=0;i0) { for(int t=0;tn_predict - remaining_tokens)<3); if(tcpreventsize>0 && tcpreventtoks && std::count(concat_output.begin(), concat_output.end(), '[')<=1) { for(int t=0;t& bans = antislop_banned_token_ids[n_past]; for(int t=0;trep_pen_slope, presence_penalty, top_k, top_a, top_p, min_p, typical_p, tfs_z, nsigma, temp, rng, kcpp_data->mirostat, kcpp_data->mirostat_tau, kcpp_data->mirostat_eta, kcpp_data->dry_multiplier, kcpp_data->dry_base, kcpp_data->dry_allowed_length, kcpp_data->dry_penalty_last_n, kcpp_data->xtc_threshold, kcpp_data->xtc_probability, sampler_order, grammar, dynatemp_range, dynatemp_exponent, smoothing_factor, smoothing_curve, adaptive_target); if (adaptive_target > 0.0f) { float original_prob = original_candidates[id].p; adaptive_p_update_history(original_prob, adaptive_p_weighted_sum, adaptive_p_total_weight, adaptive_decay); } if(draft_used) { int32_t draftedid = draft_results.draftids[logits_sampled]; if(debugmode==1 && !is_quiet) { std::string drafttok = FileFormatTokenizeID(draftedid, file_format, true); std::string realtok = FileFormatTokenizeID(id, file_format, true); printf("(Draft %d/%d): Predicted=%d (%s), Actual=%d (%s) [%s]\n",(logits_sampled+1),logits_to_sample,draftedid,drafttok.c_str(),id,realtok.c_str(),(draftedid==id?"PASS":"FAIL")); } if(draftedid!=id) //draft mismatch, abort { draft_failures += 1; abort_draft = true; } else { draft_successes += 1; } } if (grammar != nullptr) { grammar_accept_token(file_format, n_vocab, grammar, id); } if (!last_n_tokens.empty()) { last_n_tokens.erase(last_n_tokens.begin()); } last_n_tokens.push_back(id); current_context_tokens.push_back(id); // add it to the context embd.clear(); embd.push_back(id); // decrement remaining sampling budget --remaining_tokens; for (auto eid : embd) { std::string tokenizedstr = FileFormatTokenizeID(eid, file_format, inputs.render_special); bool found_eog = std::find(eog_tokens.begin(), eog_tokens.end(), eid) != eog_tokens.end(); if(!inputs.render_special && (found_eog || VecContainsIntVal(special_stop_sequence,id))) //extra filter to avoid unwanted special tokens { tokenizedstr = ""; //prevent render } delayed_generated_tokens.push_back(tokenizedstr); while(delayed_generated_tokens.size() > delayed_generated_tokens_limit && delayed_generated_tokens.size() > 0) { generated_tokens.push_back(delayed_generated_tokens[0]); concat_output_mtx.lock(); concat_output += delayed_generated_tokens[0]; concat_output_mtx.unlock(); delayed_generated_tokens.pop_front(); } } if (startedsampling && allow_regular_prints) { printf("\rGenerating (%d / %d tokens)", (kcpp_data->n_predict - remaining_tokens), kcpp_data->n_predict); } if(debugmode==1 && !is_quiet && top_picks_history.size()>0) { printf(" ["); bool firstloop = true; TopPicksData toppick = top_picks_history[top_picks_history.size()-1]; std::string topstr = toppick.selected_token; ::utreplace(topstr, "\n", "\\n"); printf("(%s <%d> %.2f%%)", RemoveBell(topstr).c_str(), toppick.selected_tokenid, toppick.selected_probability*100); int maxtoshow = (toppick.tokenid.size()>4?4:toppick.tokenid.size()); //hardcode limit even if we have more logprobs_max for (int i=0;i 0) { std::string scanstr = ""; for (int i = 0; i < delayed_generated_tokens.size(); ++i) { scanstr += delayed_generated_tokens[i]; } scanstr = toLowerCase(scanstr); for (const auto &matched : banned_phrases) { std::string matched_lower = toLowerCase(matched); if (scanstr.find(matched_lower) != std::string::npos) { //find the position in the string that contains all necessary tokens std::string checkstr = ""; int rewind_amt = 0; for (int i = delayed_generated_tokens.size() - 1; i >= 0; --i) { checkstr = delayed_generated_tokens[i] + checkstr; ++rewind_amt; if (toLowerCase(checkstr).find(matched_lower) != std::string::npos) { break; } } if (rewind_amt > 0 && (current_context_tokens.size() - rewind_amt) > 0) { int last_tok = current_context_tokens[current_context_tokens.size() - rewind_amt]; bool rwok = ContextRewind(embd, current_context_tokens, n_past, last_n_tokens, rewind_amt); //immediately terminate drafting if used abort_draft = true; if(rwok) { delayed_generated_tokens.resize(delayed_generated_tokens.size() - rewind_amt); // Check if the key exists int banindex = n_past+1; if (antislop_banned_token_ids.find(banindex) == antislop_banned_token_ids.end()) { antislop_banned_token_ids[banindex] = std::vector(); } std::vector& current_ids = antislop_banned_token_ids[banindex]; current_ids.push_back(last_tok); if (allow_regular_prints && debugmode == 1) { auto match_clean = matched; replace_all(match_clean, "\n", "\\n"); printf("\n(Banned Phrase Detected: %s - Add ID %d to banlist at index %d, and rewinding %d tokens)\n", match_clean.c_str(), last_tok, banindex, rewind_amt); } } break; } } } } if(!early_abort) { bool found_eog = std::find(eog_tokens.begin(), eog_tokens.end(), id) != eog_tokens.end(); if(!inputs.bypass_eos_token && inputs.allow_eos_token && found_eog) { if(allow_regular_prints) { printf("\n(EOS token triggered! ID:%d)",id); } early_abort = true; last_stop_reason = stop_reason::EOS_TOKEN_HIT; } } if(!early_abort) { for (const auto &matched : special_stop_sequence) { if(id==matched) { if(allow_regular_prints) { printf("\n(Special Stop Token Triggered! ID:%d)",matched); } early_abort = true; last_stop_reason = stop_reason::EOS_TOKEN_HIT; break; } } } if(!early_abort) { for (const auto &matched : stop_sequence) { if (concat_output.find(matched) != std::string::npos) { early_abort = true; if(allow_regular_prints) { auto match_clean = matched; replace_all(match_clean, "\n", "\\n"); printf("\n(Stop sequence triggered: %s)", match_clean.c_str()); } last_stop_reason = stop_reason::CUSTOM_STOPPER; break; } } } logits_sampled += 1; } //if we have somehow skipped ahead (e.g drafting), ensure that all tokens after npast are purged if (file_format == FileFormat::GGUF_GENERIC && draft_used) { llama_memory_seq_rm(llama_get_memory(llama_ctx_v4), 0, n_past, -1); if (draft_ctx) { llama_memory_seq_rm(llama_get_memory(draft_ctx), 0, n_past, -1); } } fflush(stdout); } else if(!early_abort) //do not ingest prompt if aborted! { // some user input remains from prompt or interaction, forward it to processing while ((int)embd_inp.size() > input_consumed) { int currtoken = embd_inp[input_consumed]; if(currtoken==MEDIA_TOKEN_IDENTIFIER_A || currtoken==MEDIA_TOKEN_IDENTIFIER_B) //special llava token hit { if(!media_embds_built) //this should never happen! however, handle it anyway { PrepareMediaEmbds(nctx, media_intro); media_embds_built = true; printf("\nSomehow media embeds was not prepared (maybe no fast forward), rebuilding it...\n"); } //if partial batch, dispatch existing first if(embd.size()>0) { break; } else { //batch is empty, do image processing int llavatokenscounted = 0; int llavatokensevaled = 0; int introsize = media_intro.size(); while(input_consumed < embd_inp.size() && (embd_inp[input_consumed]==MEDIA_TOKEN_IDENTIFIER_A || embd_inp[input_consumed]==MEDIA_TOKEN_IDENTIFIER_B)) { if (!last_n_tokens.empty()) { last_n_tokens.erase(last_n_tokens.begin()); } last_n_tokens.push_back(currtoken); current_context_tokens.push_back(currtoken); ++input_consumed; ++llavatokenscounted; } for(int i=0;i0 && i==0) { //added at the start of everything kcpp_embd_batch batch = kcpp_embd_batch(media_intro, n_past, use_mrope, false); auto evr = llama_decode(llama_ctx_v4, batch.batch); if(evr!=0) { printf("\nError when appending media intro: %d\n",evr); } else { printf("\rProcessing Media Intro (%d tokens)",introsize); } n_past += introsize; llavatokensevaled += introsize; } int start_size = media_objects[i].chunk_start_seq.size(); if (start_size > 0) { //add a separator between each image kcpp_embd_batch batch = kcpp_embd_batch(media_objects[i].chunk_start_seq, n_past, use_mrope, false); auto evr = llama_decode(llama_ctx_v4, batch.batch); if(evr!=0) { printf("\nError when appending media separator: %d\n",evr); } else { printf("\rProcessing Media Start Separator (%d tokens)",start_size); } n_past += start_size; llavatokensevaled += start_size; } for(int j=0;jn_batch,&n_past,is2d); llavatokensevaled += chunk.clp_image_tokens; if(!err) { media_composite_image_signature = ""; //force invalidate fprintf(stderr, "\nFailed to eval media tokens at %d!\n",n_past); output.text = nullptr; output.status = 0; output.prompt_tokens = output.completion_tokens = 0; last_stop_reason = stop_reason::ERROR_ENCOUNTERED; output.stopreason = last_stop_reason; generation_finished = true; return output; } } int end_size = media_objects[i].chunk_end_seq.size(); if (end_size > 0) { //add a separator between each image kcpp_embd_batch batch = kcpp_embd_batch(media_objects[i].chunk_end_seq, n_past, use_mrope, false); auto evr = llama_decode(llama_ctx_v4, batch.batch); if(evr!=0) { printf("\nError when appending media separator: %d\n",evr); } else { printf("\rProcessing Media End Separator (%d tokens)",end_size); } n_past += end_size; llavatokensevaled += end_size; } } if(llavatokenscounted!=llavatokensevaled) { media_composite_image_signature = ""; //force invalidate fprintf(stderr, "\nMedia tokens mismatch at %d! (%d vs %d tokens)\n",n_past,llavatokenscounted,llavatokensevaled); output.text = nullptr; output.status = 0; output.prompt_tokens = output.completion_tokens = 0; last_stop_reason = stop_reason::ERROR_ENCOUNTERED; output.stopreason = last_stop_reason; generation_finished = true; return output; } } } else { embd.push_back(currtoken); if (!last_n_tokens.empty()) { last_n_tokens.erase(last_n_tokens.begin()); } last_n_tokens.push_back(currtoken); current_context_tokens.push_back(currtoken); ++input_consumed; if ((int)embd.size() >= kcpp_data->n_batch) { break; } } } } } //flush any remaining delayed tokens while(delayed_generated_tokens.size() > 0) { generated_tokens.push_back(delayed_generated_tokens[0]); concat_output_mtx.lock(); concat_output += delayed_generated_tokens[0]; concat_output_mtx.unlock(); delayed_generated_tokens.pop_front(); } //if running rnn model in smartcache mode, save progress after each gen // if(kcpp_data->smartcache && is_recurrent && file_format==FileFormat::GGUF_GENERIC && current_context_tokens.size() > 32) // { // smartcache_quick_snapshot(); // } if(debugmode==1 && !is_quiet && file_format == FileFormat::GGUF_GENERIC) { printf("\n"); llama_perf_context_print(llama_ctx_v4); } time2 = timer_check(); float pt1 = (time1*1000.0/(embd_inp.size()==0?1:embd_inp.size())); float ts1 = (pt1>0?(1000.0/pt1):0); int realnpredict = kcpp_data->n_predict-remaining_tokens; float pt2 = (time2*1000.0/(realnpredict<=0?1:realnpredict)); float ts2 = (pt2>0?(1000.0/pt2):0); float tottime = (time1 + time2); float tokens_per_second = tottime>0?(realnpredict <= 0 ? 0 : realnpredict / tottime):0; if(debugmode==1) { printf("\n[%s] CtxLimit:%d/%d, Amt:%d/%d, Init:%.2fs, Process:%.2fs (%.1fms/T = %.2fT/s), Generate:%.2fs (%.1fms/T = %.2fT/s), Total:%.2fs (%.2fT/s)",get_timestamp_str().c_str(),(int)current_context_tokens.size(),(int)nctx, realnpredict, kcpp_data->n_predict, time0, time1, pt1, ts1, time2, pt2, ts2, (time1 + time2), tokens_per_second); } else { printf("\n[%s] CtxLimit:%d/%d, Amt:%d/%d, Init:%.2fs, Process:%.2fs (%.2fT/s), Generate:%.2fs (%.2fT/s), Total:%.2fs",get_timestamp_str().c_str(),(int)current_context_tokens.size(),(int)nctx, realnpredict, kcpp_data->n_predict, time0, time1, ts1, time2, ts2, (time1 + time2)); } if(debugmode==1 && !is_quiet && (draft_successes+draft_failures)>0) { printf("\n(Draft Results - Success:%d, Failure:%d)",draft_successes,draft_failures); } if(check_slowness && ts2<2.0f) { check_slowness = false; if(!is_quiet) { printf("\n======\nNote: Your generation speed appears rather slow. You can try relaunching KoboldCpp with the high priority toggle (or --highpriority) to see if it helps.\n======\n"); } } fflush(stdout); output.status = 1; int finaltokcount = (int)current_context_tokens.size()-realnpredict; output.prompt_tokens = (finaltokcount<0?0:finaltokcount); output.completion_tokens = realnpredict; output.stopreason = last_stop_reason; last_eval_time = pt2; last_process_time = pt1; last_token_count = realnpredict; last_input_count = (finaltokcount<0?0:finaltokcount); last_seed = kcpp_data->seed; last_draft_failed = draft_failures; last_draft_success = draft_successes; total_gens += 1; concat_output_mtx.lock(); concat_output_reader_copy_res = concat_output; concat_output_mtx.unlock(); output.text = concat_output_reader_copy_res.c_str(); generation_finished = true; return output; } size_t gpttype_calc_new_state_kv() { if(kcpp_data==nullptr) { return 0; } if(file_format == FileFormat::GGUF_GENERIC) { size_t s1 = llama_state_get_size(llama_ctx_v4); if(draft_ctx) { size_t s2 = llama_state_get_size(draft_ctx); s1 += s2; } return s1; } return 0; } size_t gpttype_calc_old_state_kv(int slot) { return savestates[slot].current_savestate_size + savestates[slot].current_draft_savestate_size; } size_t gpttype_calc_old_state_tokencount(int slot) { return savestates[slot].savestate_context_tokens.size(); } size_t gpttype_calc_new_state_tokencount() { return current_context_tokens.size(); } size_t gpttype_save_state_kv(int slot) { if(kcpp_data==nullptr) { return 0; } if(file_format == FileFormat::GGUF_GENERIC) { size_t totalbytes = 0; if (!savestates[slot].current_savestate_buffer.empty()) { //JIT free savestates[slot].current_savestate_buffer.clear(); savestates[slot].current_draft_savestate_buffer.clear(); savestates[slot].savestate_context_tokens.clear(); savestates[slot].latest_logits.clear(); savestates[slot].current_savestate_size = 0; savestates[slot].current_draft_savestate_size = 0; savestates[slot].media_signature = ""; } size_t newsize = llama_state_get_size(llama_ctx_v4); try { if (savestates[slot].current_savestate_buffer.capacity() < newsize + 512) { savestates[slot].current_savestate_buffer = std::vector(newsize + 512); // add some padding. May throw std::bad_alloc } else { savestates[slot].current_savestate_buffer.resize(newsize + 512); } } catch (const std::bad_alloc&) { fprintf(stderr, "KV Save State: Failed to allocate %zu bytes.\n", newsize + 512); return 0; } auto res = llama_state_get_data(llama_ctx_v4, savestates[slot].current_savestate_buffer.data(), newsize); if (res > 0) { totalbytes += res; savestates[slot].current_savestate_size = newsize; savestates[slot].savestate_context_tokens = current_context_tokens; savestates[slot].media_signature = media_composite_image_signature; float * lgptr = llama_get_logits(llama_ctx_v4); savestates[slot].latest_logits.assign(lgptr,lgptr+n_vocab); int maxedpos = llama_memory_seq_pos_max(llama_get_memory(llama_ctx_v4),0); //kcpp: so maxedpos appears to always be equal to ctx tokens - 2, if savestate_ctx_tokens > maxedpos + 2 then trim excess if(maxedpos > 0 && savestates[slot].savestate_context_tokens.size() > maxedpos + 2) { //dirty hack for the memory actually being off, correct the state if(debugmode==1 && !is_quiet) { printf("\nSaveState inconsistency fix, trimming from %d to %d\n",savestates[slot].savestate_context_tokens.size(),maxedpos+2); } while(savestates[slot].savestate_context_tokens.size() > maxedpos+2) { savestates[slot].savestate_context_tokens.pop_back(); } } touch_slot(slot); printf("\nKV Save State %d: Created SaveState of %zu tokens, costing %zu MB.\n",slot,savestates[slot].savestate_context_tokens.size(),savestates[slot].current_savestate_size/(1024*1024)); } if(draft_ctx) { size_t newsize2 = llama_state_get_size(draft_ctx); try { if (savestates[slot].current_draft_savestate_buffer.capacity() < newsize2 + 512) { savestates[slot].current_draft_savestate_buffer = std::vector(newsize2 + 512); } else { savestates[slot].current_draft_savestate_buffer.resize(newsize2 + 512); } } catch (const std::bad_alloc&) { fprintf(stderr, "KV Save State: Failed to allocate %zu bytes.\n", newsize2 + 512); return 0; } auto res2 = llama_state_get_data(draft_ctx, savestates[slot].current_draft_savestate_buffer.data(), newsize2); if (res2 > 0) { totalbytes += res2; savestates[slot].current_draft_savestate_size = newsize2; printf("\nKV Save State %d: Created DraftSaveState of %zu tokens, costing %zu MB.\n",slot,current_context_tokens.size(),savestates[slot].current_draft_savestate_size/(1024*1024)); } } return totalbytes; } return 0; } bool gpttype_load_state_kv(int slot) { if(kcpp_data==nullptr) { return false; } if(file_format == FileFormat::GGUF_GENERIC) { if (savestates[slot].current_savestate_buffer.empty()) { return false; } llama_memory_clear(llama_get_memory(llama_ctx_v4),true); auto res = llama_state_set_data(llama_ctx_v4, savestates[slot].current_savestate_buffer.data(), savestates[slot].current_savestate_size); if(res > 0) { current_context_tokens = savestates[slot].savestate_context_tokens; loaded_latest_logits = savestates[slot].latest_logits; printf("\nKV Load SaveState %d: Restored KV with %zu tokens.\n", slot,current_context_tokens.size()); if(draft_ctx && savestates[slot].current_draft_savestate_size>0) { llama_memory_clear(llama_get_memory(draft_ctx),true); auto res2 = llama_state_set_data(draft_ctx, savestates[slot].current_draft_savestate_buffer.data(), savestates[slot].current_draft_savestate_size); printf("\nKV Load DraftSaveState %d: Restored KV with %zu tokens.\n", slot,current_context_tokens.size()); } touch_slot(slot); } return (res > 0); } return false; } bool gpttype_clear_state_kv(bool shrink) { if(kcpp_data==nullptr) { return false; } if(file_format == FileFormat::GGUF_GENERIC) { for(int slot=0;slot0) { savestates[slot].current_draft_savestate_buffer.clear(); if(shrink) { savestates[slot].current_draft_savestate_buffer.shrink_to_fit(); } savestates[slot].current_draft_savestate_size = 0; } savestates[slot].last_used = 0; } } return true; } return false; } void touch_slot(int slot) //update the slot's last used time and nothing else { auto timenow = std::chrono::system_clock::now(); auto timestamp = std::chrono::duration_cast(timenow.time_since_epoch()).count(); savestates[slot].last_used = timestamp; } int get_identical_existing_slot() //returns slot number of slot containing exactly the same data, or -1 if nothing { int64_t slotage = INT64_MAX; // Initialize with maximum possible value int slotid = -1; int currctxsize = current_context_tokens.size(); for(int i=0;i