mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-14 02:49:41 +00:00
Support diffusion models: Add Dream 7B (#14644)
* Support diffusion models: Add Dream 7B * Move diffusion to examples * Move stuff to examples. Add patch to not use kv-cache * Address review comments * Make sampling fast * llama: remove diffusion functions * Add basic timings + cleanup * More cleanup * Review comments: better formating, use LOG instead std::cerr, re-use batch, use ubatch instead of max_length * fixup! * Review: move everything to diffusion-cli for now
This commit is contained in:
parent
64978340b0
commit
ab14019821
13 changed files with 804 additions and 0 deletions
|
@ -3354,6 +3354,10 @@ llama_token llama_vocab::token_fim_sep() const {
|
|||
return pimpl->special_fim_sep_id;
|
||||
}
|
||||
|
||||
llama_token llama_vocab::token_mask() const {
|
||||
return pimpl->special_mask_id;
|
||||
}
|
||||
|
||||
bool llama_vocab::get_add_space_prefix() const {
|
||||
return pimpl->add_space_prefix;
|
||||
}
|
||||
|
@ -3594,6 +3598,10 @@ llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab) {
|
|||
return vocab->token_fim_sep();
|
||||
}
|
||||
|
||||
llama_token llama_vocab_mask(const struct llama_vocab* vocab) {
|
||||
return vocab->token_mask();
|
||||
}
|
||||
|
||||
// deprecated
|
||||
const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token) {
|
||||
return llama_vocab_get_text(vocab, token);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue