Merge commit '2fa51c19b0' into concedo_experimental

# Conflicts:
#	.github/actions/windows-setup-cuda/action.yml
#	.github/workflows/build-linux-cross.yml
#	.github/workflows/release.yml
#	README.md
#	docs/build-riscv64-spacemit.md
#	examples/model-conversion/logits.cpp
#	ggml/CMakeLists.txt
#	ggml/src/ggml-cpu/CMakeLists.txt
#	models/templates/Kimi-K2-Instruct.jinja
#	models/templates/Kimi-K2-Thinking.jinja
#	tests/test-chat.cpp
#	tools/server/README.md
This commit is contained in:
Concedo 2025-12-11 23:04:48 +08:00
commit 278e45becf
21 changed files with 584 additions and 214 deletions

View file

@ -1733,6 +1733,10 @@ void llama_model::load_hparams(llama_model_loader & ml) {
}
ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, false);
// (optional) temperature tuning - used by mistral-large
ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_SCALE, hparams.f_attn_temp_scale, false);
ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH, hparams.n_attn_temp_floor_scale, false);
switch (hparams.n_layer) {
case 27: type = LLM_TYPE_16B; break;
case 60: type = LLM_TYPE_236B; break;