Merge commit '2fa51c19b0' into concedo_experimental

# Conflicts:
#	.github/actions/windows-setup-cuda/action.yml
#	.github/workflows/build-linux-cross.yml
#	.github/workflows/release.yml
#	README.md
#	docs/build-riscv64-spacemit.md
#	examples/model-conversion/logits.cpp
#	ggml/CMakeLists.txt
#	ggml/src/ggml-cpu/CMakeLists.txt
#	models/templates/Kimi-K2-Instruct.jinja
#	models/templates/Kimi-K2-Thinking.jinja
#	tests/test-chat.cpp
#	tools/server/README.md
This commit is contained in:
Concedo 2025-12-11 23:04:48 +08:00
commit 278e45becf
21 changed files with 584 additions and 214 deletions

View file

@ -251,7 +251,10 @@ llama_context::llama_context(
LLAMA_LOG_DEBUG("%s: backend_ptrs.size() = %zu\n", __func__, backend_ptrs.size());
const size_t max_nodes = this->graph_max_nodes();
const uint32_t n_seqs = cparams.n_seq_max;
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
const size_t max_nodes = this->graph_max_nodes(n_tokens);
LLAMA_LOG_DEBUG("%s: max_nodes = %zu\n", __func__, max_nodes);
@ -309,9 +312,6 @@ llama_context::llama_context(
cross.v_embd.clear();
const uint32_t n_seqs = cparams.n_seq_max;
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
// avoid reserving graphs with zero outputs - assume one output per sequence
n_outputs = n_seqs;
@ -1396,9 +1396,9 @@ void llama_context::output_reorder() {
// graph
//
uint32_t llama_context::graph_max_nodes() const {
uint32_t llama_context::graph_max_nodes(uint32_t n_tokens) const {
if (model.arch == LLM_ARCH_QWEN3NEXT) {
return std::max<uint32_t>(8192u, 32u*model.n_tensors());
return std::max<uint32_t>(n_tokens * 40, 32u * model.n_tensors());
}
return std::max<uint32_t>(1024u, 8u*model.n_tensors());
}