mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-12 09:59:41 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .ecrc # CMakePresets.json # ci/run.sh # docs/backend/SYCL.md # ggml/src/CMakeLists.txt # src/llama.cpp # tests/test-backend-ops.cpp # tests/test-sampling.cpp
This commit is contained in:
commit
b2c1ff7a13
30 changed files with 7666 additions and 6889 deletions
|
@ -511,6 +511,9 @@ extern "C" {
|
|||
// to the decoder to start generating output sequence. For other models, it returns -1.
|
||||
LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
|
||||
|
||||
// Returns true if the model is recurrent (like Mamba, RWKV, etc.)
|
||||
LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model);
|
||||
|
||||
// Returns 0 on success
|
||||
LLAMA_API uint32_t llama_model_quantize(
|
||||
const char * fname_inp,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue