Merge branch 'upstream' into concedo_experimental

# Conflicts:
#	README.md
#	examples/gbnf-validator/gbnf-validator.cpp
#	examples/llava/clip.cpp
#	examples/run/README.md
#	examples/run/run.cpp
#	examples/server/README.md
#	ggml/src/ggml-cpu/CMakeLists.txt
#	src/llama.cpp
#	tests/test-grammar-integration.cpp
#	tests/test-llama-grammar.cpp
This commit is contained in:
Concedo 2024-12-21 09:41:49 +08:00
commit 4c56b7cada
19 changed files with 550 additions and 226 deletions

View file

@ -627,7 +627,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
[](common_params & params) { [](common_params & params) {
params.ctx_shift = false; params.ctx_shift = false;
} }
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT")); ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT"));
add_opt(common_arg( add_opt(common_arg(
{"--chunks"}, "N", {"--chunks"}, "N",
string_format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks), string_format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks),
@ -2207,5 +2207,17 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
} }
).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER})); ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER}));
// model-specific
add_opt(common_arg(
{"--tts-oute-default"},
string_format("use default OuteTTS models (note: can download weights from the internet)"),
[](common_params & params) {
params.hf_repo = "OuteAI/OuteTTS-0.2-500M-GGUF";
params.hf_file = "OuteTTS-0.2-500M-Q8_0.gguf";
params.vocoder.hf_repo = "ggml-org/WavTokenizer";
params.vocoder.hf_file = "WavTokenizer-Large-75-F16.gguf";
}
).set_examples({LLAMA_EXAMPLE_TTS}));
return ctx_arg; return ctx_arg;
} }

View file

@ -2200,6 +2200,15 @@ class Phi3MiniModel(Model):
model_arch = gguf.MODEL_ARCH.PHI3 model_arch = gguf.MODEL_ARCH.PHI3
def set_vocab(self): def set_vocab(self):
# Phi-4 model uses GPT2Tokenizer
tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
if tokenizer_config_file.is_file():
with open(tokenizer_config_file, "r", encoding="utf-8") as f:
tokenizer_config_json = json.load(f)
tokenizer_class = tokenizer_config_json['tokenizer_class']
if tokenizer_class == 'GPT2Tokenizer':
return self._set_vocab_gpt2()
from sentencepiece import SentencePieceProcessor from sentencepiece import SentencePieceProcessor
tokenizer_path = self.dir_model / 'tokenizer.model' tokenizer_path = self.dir_model / 'tokenizer.model'
@ -2316,7 +2325,11 @@ class Phi3MiniModel(Model):
self.gguf_writer.add_rope_dimension_count(rope_dims) self.gguf_writer.add_rope_dimension_count(rope_dims)
self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"])) self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
self.gguf_writer.add_file_type(self.ftype) self.gguf_writer.add_file_type(self.ftype)
self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"])) sliding_window = self.hparams.get("sliding_window")
# use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
if sliding_window is None:
sliding_window = 0
self.gguf_writer.add_sliding_window(sliding_window)
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
n_embd = self.find_hparam(["hidden_size", "n_embd"]) n_embd = self.find_hparam(["hidden_size", "n_embd"])
@ -2615,7 +2628,7 @@ class InternLM2Model(Model):
return [(self.map_tensor_name(name), data_torch)] return [(self.map_tensor_name(name), data_torch)]
@Model.register("BertModel", "CamembertModel", "RobertaModel") @Model.register("BertModel", "CamembertModel")
class BertModel(Model): class BertModel(Model):
model_arch = gguf.MODEL_ARCH.BERT model_arch = gguf.MODEL_ARCH.BERT
@ -2688,6 +2701,51 @@ class BertModel(Model):
return [(self.map_tensor_name(name), data_torch)] return [(self.map_tensor_name(name), data_torch)]
@Model.register("RobertaModel")
class RobertaModel(BertModel):
model_arch = gguf.MODEL_ARCH.BERT
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# we need the pad_token_id to know how to chop down position_embd matrix
if (pad_token_id := self.hparams.get("pad_token_id")) is not None:
self._position_offset = 1 + pad_token_id
if "max_position_embeddings" in self.hparams:
self.hparams["max_position_embeddings"] -= self._position_offset
else:
self._position_offset = None
def set_vocab(self):
"""Support BPE tokenizers for roberta models"""
bpe_tok_path = self.dir_model / "tokenizer.json"
if bpe_tok_path.exists():
self._set_vocab_gpt2()
self.gguf_writer.add_add_bos_token(True)
self.gguf_writer.add_add_eos_token(True)
# we need this to validate the size of the token_type embeddings
# though currently we are passing all zeros to the token_type embeddings
# "Sequence A" or "Sequence B"
self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1))
else:
return super().set_vocab()
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
# if name starts with "roberta.", remove the prefix
# e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main
if name.startswith("roberta."):
name = name[8:]
# position embeddings start at pad_token_id + 1, so just chop down the weight tensor
if name == "embeddings.position_embeddings.weight":
if self._position_offset is not None:
data_torch = data_torch[self._position_offset:,:]
return super().modify_tensors(data_torch, name, bid)
@Model.register("NomicBertModel") @Model.register("NomicBertModel")
class NomicBertModel(BertModel): class NomicBertModel(BertModel):
model_arch = gguf.MODEL_ARCH.NOMIC_BERT model_arch = gguf.MODEL_ARCH.NOMIC_BERT
@ -3007,6 +3065,9 @@ class Rwkv6Model(Model):
if new_name.endswith("time_mix_w2.weight"): if new_name.endswith("time_mix_w2.weight"):
data_torch = data_torch.permute(0, 2, 1) data_torch = data_torch.permute(0, 2, 1)
if new_name.endswith("time_mix_decay.weight") or "lerp" in new_name:
data_torch = data_torch.squeeze()
rescale_every_n_layers = self.hparams["rescale_every"] rescale_every_n_layers = self.hparams["rescale_every"]
if rescale_every_n_layers > 0: if rescale_every_n_layers > 0:
if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"): if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"):

Binary file not shown.

View file

@ -93,6 +93,7 @@ struct slot_params {
std::vector<std::string> antiprompt; std::vector<std::string> antiprompt;
bool timings_per_token = false; bool timings_per_token = false;
bool post_sampling_probs = false;
bool ignore_eos = false; bool ignore_eos = false;
struct common_params_sampling sampling; struct common_params_sampling sampling;
@ -151,6 +152,7 @@ struct slot_params {
{"speculative.n_min", speculative.n_min}, {"speculative.n_min", speculative.n_min},
{"speculative.p_min", speculative.p_min}, {"speculative.p_min", speculative.p_min},
{"timings_per_token", timings_per_token}, {"timings_per_token", timings_per_token},
{"post_sampling_probs", post_sampling_probs},
}; };
} }
}; };
@ -231,6 +233,7 @@ struct server_task {
params.sampling.seed = json_value(data, "seed", defaults.sampling.seed); params.sampling.seed = json_value(data, "seed", defaults.sampling.seed);
params.sampling.n_probs = json_value(data, "n_probs", defaults.sampling.n_probs); params.sampling.n_probs = json_value(data, "n_probs", defaults.sampling.n_probs);
params.sampling.min_keep = json_value(data, "min_keep", defaults.sampling.min_keep); params.sampling.min_keep = json_value(data, "min_keep", defaults.sampling.min_keep);
params.post_sampling_probs = json_value(data, "post_sampling_probs", defaults.post_sampling_probs);
params.speculative.n_min = json_value(data, "speculative.n_min", defaults.speculative.n_min); params.speculative.n_min = json_value(data, "speculative.n_min", defaults.speculative.n_min);
params.speculative.n_max = json_value(data, "speculative.n_max", defaults.speculative.n_max); params.speculative.n_max = json_value(data, "speculative.n_max", defaults.speculative.n_max);
@ -436,36 +439,67 @@ inline std::string stop_type_to_str(stop_type type) {
struct completion_token_output { struct completion_token_output {
llama_token tok; llama_token tok;
float prob;
std::string text_to_send; std::string text_to_send;
struct token_prob { struct prob_info {
llama_token tok; llama_token tok;
std::string tok_str; std::string txt;
float prob; float prob;
}; };
std::vector<token_prob> probs; std::vector<prob_info> probs;
json to_json() const { json to_json(bool post_sampling_probs) const {
json probs_for_token = json::array(); json probs_for_token = json::array();
for (const auto & p : probs) { for (const auto & p : probs) {
std::string txt(p.txt);
txt.resize(validate_utf8(txt));
probs_for_token.push_back(json { probs_for_token.push_back(json {
{"tok_str", p.tok_str}, {"id", p.tok},
{"prob", p.prob}, {"token", txt},
{"bytes", str_to_bytes(p.txt)},
{
post_sampling_probs ? "prob" : "logprob",
post_sampling_probs ? p.prob : logarithm(p.prob)
},
}); });
} }
return probs_for_token; return probs_for_token;
} }
static json probs_vector_to_json(const std::vector<completion_token_output> & probs) { static json probs_vector_to_json(const std::vector<completion_token_output> & probs, bool post_sampling_probs) {
json out = json::array(); json out = json::array();
for (const auto & prob : probs) { for (const auto & p : probs) {
const std::string tok_str = prob.text_to_send; std::string txt(p.text_to_send);
txt.resize(validate_utf8(txt));
out.push_back(json { out.push_back(json {
{"content", tok_str}, {"id", p.tok},
{"probs", prob.to_json()}, {"token", txt},
{"bytes", str_to_bytes(p.text_to_send)},
{
post_sampling_probs ? "prob" : "logprob",
post_sampling_probs ? p.prob : logarithm(p.prob)
},
{
post_sampling_probs ? "top_probs" : "top_logprobs",
p.to_json(post_sampling_probs)
},
}); });
} }
return out; return out;
} }
static float logarithm(float x) {
// nlohmann::json converts -inf to null, so we need to prevent that
return x == 0.0f ? std::numeric_limits<float>::lowest() : std::log(x);
}
static std::vector<unsigned char> str_to_bytes(const std::string & str) {
std::vector<unsigned char> bytes;
for (unsigned char c : str) {
bytes.push_back(c);
}
return bytes;
}
}; };
struct server_task_result_cmpl_final : server_task_result { struct server_task_result_cmpl_final : server_task_result {
@ -486,6 +520,7 @@ struct server_task_result_cmpl_final : server_task_result {
std::string stopping_word; std::string stopping_word;
stop_type stop = STOP_TYPE_NONE; stop_type stop = STOP_TYPE_NONE;
bool post_sampling_probs;
std::vector<completion_token_output> probs_output; std::vector<completion_token_output> probs_output;
slot_params generation_params; slot_params generation_params;
@ -530,8 +565,8 @@ struct server_task_result_cmpl_final : server_task_result {
{"tokens_cached", n_tokens_cached}, {"tokens_cached", n_tokens_cached},
{"timings", timings.to_json()}, {"timings", timings.to_json()},
}; };
if (!probs_output.empty()) { if (!stream && !probs_output.empty()) {
res["completion_probabilities"] = completion_token_output::probs_vector_to_json(probs_output); res["completion_probabilities"] = completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs);
} }
return res; return res;
} }
@ -542,19 +577,25 @@ struct server_task_result_cmpl_final : server_task_result {
finish_reason = "stop"; finish_reason = "stop";
} }
json choices = json::array({json{ json choice = json{
{"finish_reason", finish_reason}, {"finish_reason", finish_reason},
{"index", 0}, {"index", 0},
{"message", json { {"message", json {
{"content", content}, {"content", content},
{"role", "assistant"} {"role", "assistant"}
} }
}}}); }};
if (!stream && probs_output.size() > 0) {
choice["logprobs"] = json{
{"content", completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs)},
};
}
std::time_t t = std::time(0); std::time_t t = std::time(0);
json res = json { json res = json {
{"choices", choices}, {"choices", json::array({choice})},
{"created", t}, {"created", t},
{"model", oaicompat_model}, {"model", oaicompat_model},
{"object", "chat.completion"}, {"object", "chat.completion"},
@ -584,12 +625,14 @@ struct server_task_result_cmpl_final : server_task_result {
finish_reason = "stop"; finish_reason = "stop";
} }
json choices = json::array({json{{"finish_reason", finish_reason}, json choice = json{
{"finish_reason", finish_reason},
{"index", 0}, {"index", 0},
{"delta", json::object()}}}); {"delta", json::object()}
};
json ret = json { json ret = json {
{"choices", choices}, {"choices", json::array({choice})},
{"created", t}, {"created", t},
{"id", oaicompat_cmpl_id}, {"id", oaicompat_cmpl_id},
{"model", oaicompat_model}, {"model", oaicompat_model},
@ -618,7 +661,8 @@ struct server_task_result_cmpl_partial : server_task_result {
int32_t n_decoded; int32_t n_decoded;
int32_t n_prompt_tokens; int32_t n_prompt_tokens;
std::vector<completion_token_output> probs_output; bool post_sampling_probs;
completion_token_output prob_output;
result_timings timings; result_timings timings;
// OAI-compat fields // OAI-compat fields
@ -655,8 +699,8 @@ struct server_task_result_cmpl_partial : server_task_result {
if (timings.prompt_n > 0) { if (timings.prompt_n > 0) {
res.push_back({"timings", timings.to_json()}); res.push_back({"timings", timings.to_json()});
} }
if (!probs_output.empty()) { if (!prob_output.probs.empty()) {
res["completion_probabilities"] = completion_token_output::probs_vector_to_json(probs_output); res["completion_probabilities"] = completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs);
} }
return res; return res;
} }
@ -708,6 +752,14 @@ struct server_task_result_cmpl_partial : server_task_result {
}}); }});
} }
GGML_ASSERT(choices.size() >= 1);
if (prob_output.probs.size() > 0) {
choices[0]["logprobs"] = json{
{"content", completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs)},
};
}
json ret = json { json ret = json {
{"choices", choices}, {"choices", choices},
{"created", t}, {"created", t},
@ -1001,7 +1053,6 @@ struct server_slot {
// stats // stats
size_t n_sent_text = 0; // number of sent text character size_t n_sent_text = 0; // number of sent text character
size_t n_sent_token_probs = 0;
int64_t t_start_process_prompt; int64_t t_start_process_prompt;
int64_t t_start_generation; int64_t t_start_generation;
@ -1023,7 +1074,6 @@ struct server_slot {
stopping_word = ""; stopping_word = "";
n_past = 0; n_past = 0;
n_sent_text = 0; n_sent_text = 0;
n_sent_token_probs = 0;
task_type = SERVER_TASK_TYPE_COMPLETION; task_type = SERVER_TASK_TYPE_COMPLETION;
generated_tokens.clear(); generated_tokens.clear();
@ -1764,7 +1814,7 @@ struct server_context {
bool process_token(completion_token_output & result, server_slot & slot) { bool process_token(completion_token_output & result, server_slot & slot) {
// remember which tokens were sampled - used for repetition penalties during sampling // remember which tokens were sampled - used for repetition penalties during sampling
const std::string token_str = common_token_to_piece(ctx, result.tok, params_base.special); const std::string token_str = result.text_to_send;
slot.sampled = result.tok; slot.sampled = result.tok;
slot.generated_text += token_str; slot.generated_text += token_str;
@ -1774,26 +1824,7 @@ struct server_context {
slot.has_next_token = true; slot.has_next_token = true;
// check if there is incomplete UTF-8 character at the end // check if there is incomplete UTF-8 character at the end
bool incomplete = false; bool incomplete = validate_utf8(slot.generated_text) < slot.generated_text.size();
for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i) {
unsigned char c = slot.generated_text[slot.generated_text.size() - i];
if ((c & 0xC0) == 0x80) {
// continuation byte: 10xxxxxx
continue;
}
if ((c & 0xE0) == 0xC0) {
// 2-byte character: 110xxxxx ...
incomplete = i < 2;
} else if ((c & 0xF0) == 0xE0) {
// 3-byte character: 1110xxxx ...
incomplete = i < 3;
} else if ((c & 0xF8) == 0xF0) {
// 4-byte character: 11110xxx ...
incomplete = i < 4;
}
// else 1-byte character or invalid byte
break;
}
// search stop word and delete it // search stop word and delete it
if (!incomplete) { if (!incomplete) {
@ -1923,6 +1954,55 @@ struct server_context {
return slot.has_next_token; // continue return slot.has_next_token; // continue
} }
void populate_token_probs(const server_slot & slot, completion_token_output & result, bool post_sampling, bool special, int idx) {
size_t n_probs = slot.params.sampling.n_probs;
size_t n_vocab = llama_n_vocab(llama_get_model(ctx));
if (post_sampling) {
const auto * cur_p = common_sampler_get_candidates(slot.smpl);
const size_t max_probs = cur_p->size;
// set probability for sampled token
for (size_t i = 0; i < max_probs; i++) {
if (cur_p->data[i].id == result.tok) {
result.prob = cur_p->data[i].p;
break;
}
}
// set probability for top n_probs tokens
result.probs.reserve(max_probs);
for (size_t i = 0; i < std::min(max_probs, n_probs); i++) {
result.probs.push_back({
cur_p->data[i].id,
common_detokenize(ctx, {cur_p->data[i].id}, special),
cur_p->data[i].p
});
}
} else {
// TODO: optimize this with min-p optimization
std::vector<llama_token_data> cur = get_token_probabilities(ctx, idx);
// set probability for sampled token
for (size_t i = 0; i < n_vocab; i++) {
// set probability for sampled token
if (cur[i].id == result.tok) {
result.prob = cur[i].p;
break;
}
}
// set probability for top n_probs tokens
result.probs.reserve(n_probs);
for (size_t i = 0; i < std::min(n_vocab, n_probs); i++) {
result.probs.push_back({
cur[i].id,
common_detokenize(ctx, {cur[i].id}, special),
cur[i].p
});
}
}
}
void send_error(const server_task & task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) { void send_error(const server_task & task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) {
send_error(task.id, error, type); send_error(task.id, error, type);
} }
@ -1952,6 +2032,7 @@ struct server_context {
res->n_decoded = slot.n_decoded; res->n_decoded = slot.n_decoded;
res->n_prompt_tokens = slot.n_prompt_tokens; res->n_prompt_tokens = slot.n_prompt_tokens;
res->post_sampling_probs = slot.params.post_sampling_probs;
res->verbose = slot.params.verbose; res->verbose = slot.params.verbose;
res->oaicompat = slot.params.oaicompat; res->oaicompat = slot.params.oaicompat;
@ -1961,17 +2042,7 @@ struct server_context {
// populate res.probs_output // populate res.probs_output
if (slot.params.sampling.n_probs > 0) { if (slot.params.sampling.n_probs > 0) {
const llama_tokens to_send_toks = common_tokenize(ctx, tkn.text_to_send, false); res->prob_output = tkn; // copy the token probs
const size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
const size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size());
std::vector<completion_token_output> probs_output;
if (probs_pos < probs_stop_pos) {
res->probs_output = std::vector<completion_token_output>(
slot.generated_token_probs.begin() + probs_pos,
slot.generated_token_probs.begin() + probs_stop_pos);
}
} }
// populate timings if this is final response or timings_per_token is enabled // populate timings if this is final response or timings_per_token is enabled
@ -2000,6 +2071,7 @@ struct server_context {
res->has_new_line = slot.has_new_line; res->has_new_line = slot.has_new_line;
res->stopping_word = slot.stopping_word; res->stopping_word = slot.stopping_word;
res->stop = slot.stop; res->stop = slot.stop;
res->post_sampling_probs = slot.params.post_sampling_probs;
res->verbose = slot.params.verbose; res->verbose = slot.params.verbose;
res->stream = slot.params.stream; res->stream = slot.params.stream;
@ -2796,7 +2868,9 @@ struct server_context {
continue; // continue loop of slots continue; // continue loop of slots
} }
llama_token id = common_sampler_sample(slot.smpl, ctx, slot.i_batch - i); const int tok_idx = slot.i_batch - i;
llama_token id = common_sampler_sample(slot.smpl, ctx, tok_idx);
slot.i_batch = -1; slot.i_batch = -1;
@ -2816,16 +2890,11 @@ struct server_context {
completion_token_output result; completion_token_output result;
result.tok = id; result.tok = id;
result.text_to_send = common_token_to_piece(ctx, result.tok, params_base.special);
result.prob = 1.0f; // TODO: set it here instead of doing inside populate_token_probs
const auto * cur_p = common_sampler_get_candidates(slot.smpl); if (slot.params.sampling.n_probs > 0) {
populate_token_probs(slot, result, slot.params.post_sampling_probs, params_base.special, tok_idx);
for (size_t i = 0; i < (size_t) slot.params.sampling.n_probs; ++i) {
auto tok_id = cur_p->data[i].id;
result.probs.push_back({
tok_id,
tokens_to_output_formatted_string(ctx, tok_id),
i >= cur_p->size ? 0.0f : cur_p->data[i].p,
});
} }
if (!process_token(result, slot)) { if (!process_token(result, slot)) {
@ -2910,6 +2979,10 @@ struct server_context {
completion_token_output result; completion_token_output result;
result.tok = ids[i]; result.tok = ids[i];
result.text_to_send = common_token_to_piece(ctx, result.tok, params_base.special);
result.prob = 1.0f; // set later
// TODO: set result.probs
if (!process_token(result, slot)) { if (!process_token(result, slot)) {
// release slot because of stop condition // release slot because of stop condition

View file

@ -92,7 +92,6 @@ def test_chat_completion_with_openai_library():
seed=42, seed=42,
temperature=0.8, temperature=0.8,
) )
print(res)
assert res.choices[0].finish_reason == "length" assert res.choices[0].finish_reason == "length"
assert res.choices[0].message.content is not None assert res.choices[0].message.content is not None
assert match_regex("(Suddenly)+", res.choices[0].message.content) assert match_regex("(Suddenly)+", res.choices[0].message.content)
@ -163,3 +162,64 @@ def test_chat_completion_with_timings_per_token():
assert "predicted_per_second" in data["timings"] assert "predicted_per_second" in data["timings"]
assert "predicted_n" in data["timings"] assert "predicted_n" in data["timings"]
assert data["timings"]["predicted_n"] <= 10 assert data["timings"]["predicted_n"] <= 10
def test_logprobs():
global server
server.start()
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}")
res = client.chat.completions.create(
model="gpt-3.5-turbo-instruct",
temperature=0.0,
messages=[
{"role": "system", "content": "Book"},
{"role": "user", "content": "What is the best book"},
],
max_tokens=5,
logprobs=True,
top_logprobs=10,
)
output_text = res.choices[0].message.content
aggregated_text = ''
assert res.choices[0].logprobs is not None
assert res.choices[0].logprobs.content is not None
for token in res.choices[0].logprobs.content:
aggregated_text += token.token
assert token.logprob <= 0.0
assert token.bytes is not None
assert len(token.top_logprobs) > 0
assert aggregated_text == output_text
def test_logprobs_stream():
global server
server.start()
client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}")
res = client.chat.completions.create(
model="gpt-3.5-turbo-instruct",
temperature=0.0,
messages=[
{"role": "system", "content": "Book"},
{"role": "user", "content": "What is the best book"},
],
max_tokens=5,
logprobs=True,
top_logprobs=10,
stream=True,
)
output_text = ''
aggregated_text = ''
for data in res:
choice = data.choices[0]
if choice.finish_reason is None:
if choice.delta.content:
output_text += choice.delta.content
assert choice.logprobs is not None
assert choice.logprobs.content is not None
for token in choice.logprobs.content:
aggregated_text += token.token
assert token.logprob <= 0.0
assert token.bytes is not None
assert token.top_logprobs is not None
assert len(token.top_logprobs) > 0
assert aggregated_text == output_text

View file

@ -270,9 +270,68 @@ def test_n_probs():
assert "completion_probabilities" in res.body assert "completion_probabilities" in res.body
assert len(res.body["completion_probabilities"]) == 5 assert len(res.body["completion_probabilities"]) == 5
for tok in res.body["completion_probabilities"]: for tok in res.body["completion_probabilities"]:
assert "probs" in tok assert "id" in tok and tok["id"] > 0
assert len(tok["probs"]) == 10 assert "token" in tok and type(tok["token"]) == str
for prob in tok["probs"]: assert "logprob" in tok and tok["logprob"] <= 0.0
assert "prob" in prob assert "bytes" in tok and type(tok["bytes"]) == list
assert "tok_str" in prob assert len(tok["top_logprobs"]) == 10
assert 0.0 <= prob["prob"] <= 1.0 for prob in tok["top_logprobs"]:
assert "id" in prob and prob["id"] > 0
assert "token" in prob and type(prob["token"]) == str
assert "logprob" in prob and prob["logprob"] <= 0.0
assert "bytes" in prob and type(prob["bytes"]) == list
def test_n_probs_stream():
global server
server.start()
res = server.make_stream_request("POST", "/completion", data={
"prompt": "I believe the meaning of life is",
"n_probs": 10,
"temperature": 0.0,
"n_predict": 5,
"stream": True,
})
for data in res:
if data["stop"] == False:
assert "completion_probabilities" in data
assert len(data["completion_probabilities"]) == 1
for tok in data["completion_probabilities"]:
assert "id" in tok and tok["id"] > 0
assert "token" in tok and type(tok["token"]) == str
assert "logprob" in tok and tok["logprob"] <= 0.0
assert "bytes" in tok and type(tok["bytes"]) == list
assert len(tok["top_logprobs"]) == 10
for prob in tok["top_logprobs"]:
assert "id" in prob and prob["id"] > 0
assert "token" in prob and type(prob["token"]) == str
assert "logprob" in prob and prob["logprob"] <= 0.0
assert "bytes" in prob and type(prob["bytes"]) == list
def test_n_probs_post_sampling():
global server
server.start()
res = server.make_request("POST", "/completion", data={
"prompt": "I believe the meaning of life is",
"n_probs": 10,
"temperature": 0.0,
"n_predict": 5,
"post_sampling_probs": True,
})
assert res.status_code == 200
assert "completion_probabilities" in res.body
assert len(res.body["completion_probabilities"]) == 5
for tok in res.body["completion_probabilities"]:
assert "id" in tok and tok["id"] > 0
assert "token" in tok and type(tok["token"]) == str
assert "prob" in tok and 0.0 < tok["prob"] <= 1.0
assert "bytes" in tok and type(tok["bytes"]) == list
assert len(tok["top_probs"]) == 10
for prob in tok["top_probs"]:
assert "id" in prob and prob["id"] > 0
assert "token" in prob and type(prob["token"]) == str
assert "prob" in prob and 0.0 <= prob["prob"] <= 1.0
assert "bytes" in prob and type(prob["bytes"]) == list
# because the test model usually output token with either 100% or 0% probability, we need to check all the top_probs
assert any(prob["prob"] == 1.0 for prob in tok["top_probs"])

View file

@ -50,6 +50,8 @@ def test_embedding_multiple():
@pytest.mark.parametrize( @pytest.mark.parametrize(
"input,is_multi_prompt", "input,is_multi_prompt",
[ [
# do not crash on empty input
("", False),
# single prompt # single prompt
("string", False), ("string", False),
([12, 34, 56], False), ([12, 34, 56], False),
@ -103,6 +105,7 @@ def test_embedding_pooling_none_oai():
# /v1/embeddings does not support pooling type 'none' # /v1/embeddings does not support pooling type 'none'
assert res.status_code == 400 assert res.status_code == 400
assert "error" in res.body
def test_embedding_openai_library_single(): def test_embedding_openai_library_single():

View file

@ -171,6 +171,36 @@ static std::vector<llama_tokens> tokenize_input_prompts(llama_context * ctx, con
return result; return result;
} }
// return the last index of character that can form a valid string
// if the last character is potentially cut in half, return the index before the cut
// if validate_utf8(text) == text.size(), then the whole text is valid utf8
static size_t validate_utf8(const std::string& text) {
size_t len = text.size();
if (len == 0) return 0;
// Check the last few bytes to see if a multi-byte character is cut off
for (size_t i = 1; i <= 4 && i <= len; ++i) {
unsigned char c = text[len - i];
// Check for start of a multi-byte sequence from the end
if ((c & 0xE0) == 0xC0) {
// 2-byte character start: 110xxxxx
// Needs at least 2 bytes
if (i < 2) return len - i;
} else if ((c & 0xF0) == 0xE0) {
// 3-byte character start: 1110xxxx
// Needs at least 3 bytes
if (i < 3) return len - i;
} else if ((c & 0xF8) == 0xF0) {
// 4-byte character start: 11110xxx
// Needs at least 4 bytes
if (i < 4) return len - i;
}
}
// If no cut-off multi-byte character is found, return full length
return len;
}
// //
// template utils // template utils
// //
@ -671,3 +701,33 @@ static json format_logit_bias(const std::vector<llama_logit_bias> & logit_bias)
static std::string safe_json_to_str(json data) { static std::string safe_json_to_str(json data) {
return data.dump(-1, ' ', false, json::error_handler_t::replace); return data.dump(-1, ' ', false, json::error_handler_t::replace);
} }
static std::vector<llama_token_data> get_token_probabilities(llama_context * ctx, int idx) {
std::vector<llama_token_data> cur;
const auto * logits = llama_get_logits_ith(ctx, idx);
const int n_vocab = llama_n_vocab(llama_get_model(ctx));
cur.resize(n_vocab);
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
}
// sort tokens by logits
std::sort(cur.begin(), cur.end(), [](const llama_token_data & a, const llama_token_data & b) {
return a.logit > b.logit;
});
// apply softmax
float max_l = cur[0].logit;
float cum_sum = 0.0f;
for (size_t i = 0; i < cur.size(); ++i) {
float p = expf(cur[i].logit - max_l);
cur[i].p = p;
cum_sum += p;
}
for (size_t i = 0; i < cur.size(); ++i) {
cur[i].p /= cum_sum;
}
return cur;
}

View file

@ -13,7 +13,7 @@ import hljs from './highlight-config';
import daisyuiThemes from 'daisyui/src/theming/themes'; import daisyuiThemes from 'daisyui/src/theming/themes';
// ponyfill for missing ReadableStream asyncIterator on Safari // ponyfill for missing ReadableStream asyncIterator on Safari
import { asyncIterator } from "@sec-ant/readable-stream/ponyfill/asyncIterator"; import { asyncIterator } from '@sec-ant/readable-stream/ponyfill/asyncIterator';
const isDev = import.meta.env.MODE === 'development'; const isDev = import.meta.env.MODE === 'development';
@ -22,7 +22,22 @@ const isString = (x) => !!x.toLowerCase;
const isBoolean = (x) => x === true || x === false; const isBoolean = (x) => x === true || x === false;
const isNumeric = (n) => !isString(n) && !isNaN(n) && !isBoolean(n); const isNumeric = (n) => !isString(n) && !isNaN(n) && !isBoolean(n);
const escapeAttr = (str) => str.replace(/>/g, '&gt;').replace(/"/g, '&quot;'); const escapeAttr = (str) => str.replace(/>/g, '&gt;').replace(/"/g, '&quot;');
const copyStr = (str) => navigator.clipboard.writeText(str); const copyStr = (textToCopy) => {
// Navigator clipboard api needs a secure context (https)
if (navigator.clipboard && window.isSecureContext) {
navigator.clipboard.writeText(textToCopy);
} else {
// Use the 'out of viewport hidden text area' trick
const textArea = document.createElement('textarea');
textArea.value = textToCopy;
// Move textarea out of the viewport so it's not visible
textArea.style.position = 'absolute';
textArea.style.left = '-999999px';
document.body.prepend(textArea);
textArea.select();
document.execCommand('copy');
}
};
// constants // constants
const BASE_URL = isDev const BASE_URL = isDev
@ -130,9 +145,9 @@ const VueMarkdown = defineComponent(
}; };
window.copyStr = copyStr; window.copyStr = copyStr;
const content = computed(() => md.value.render(props.source)); const content = computed(() => md.value.render(props.source));
return () => h("div", { innerHTML: content.value }); return () => h('div', { innerHTML: content.value });
}, },
{ props: ["source"] } { props: ['source'] }
); );
// input field to be used by settings modal // input field to be used by settings modal

View file

@ -579,21 +579,21 @@ static void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
const block_q4_0x4 * b_ptr = (const block_q4_0x4 *)vx; const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx;
for (int c = 0; c < nc; c += ncols_interleaved) { for (int c = 0; c < nc; c += ncols_interleaved) {
const block_q8_0 * a_ptr = (const block_q8_0 *)vy; const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
float32x4_t acc = vdupq_n_f32(0); float32x4_t acc = vdupq_n_f32(0);
for (int b = 0; b < nb; b++) { for (int b = 0; b < nb; b++) {
int8x16_t b0 = vld1q_s8((const int8_t *)b_ptr->qs); int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs);
int8x16_t b1 = vld1q_s8((const int8_t *)b_ptr->qs + 16); int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16);
int8x16_t b2 = vld1q_s8((const int8_t *)b_ptr->qs + 32); int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32);
int8x16_t b3 = vld1q_s8((const int8_t *)b_ptr->qs + 48); int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48);
float16x4_t bd = vld1_f16((const __fp16 *)b_ptr->d); float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d);
int8x16_t a0 = vld1q_s8(a_ptr->qs); int8x16_t a0 = vld1q_s8(a_ptr->qs);
int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2); int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2);
float16x4_t ad = vld1_dup_f16((const __fp16 *)&a_ptr->d); float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d);
int32x4_t ret = vdupq_n_s32(0); int32x4_t ret = vdupq_n_s32(0);
@ -662,72 +662,52 @@ static void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c
UNUSED(ncols_interleaved); UNUSED(ncols_interleaved);
UNUSED(blocklen); UNUSED(blocklen);
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
const void * b_ptr = vx; const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx;
const void * a_ptr = vy;
float * res_ptr = s;
__asm__ __volatile__( for (int c = 0; c < nc; c += ncols_interleaved) {
"movi v2.16b, #0x4\n" const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
"movi v1.16b, #0xf0\n" float32x4_t acc = vdupq_n_f32(0);
"add %x[b_ptr], %x[b_ptr], #0x8\n" for (int b = 0; b < nb; b++) {
"1:" // Column loop int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs);
"add x23, %x[a_ptr], #0x2\n" int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16);
"movi v0.16b, #0x0\n" int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32);
"mov x22, %x[nb]\n" int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48);
"2:" // Block loop float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d);
"ldr q31, [%x[b_ptr], #0x0]\n"
"ldr q30, [%x[b_ptr], #0x10]\n" int8x16_t a0 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs);
"mov x21, x23\n" int8x16_t a1 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 1);
"movi v29.4s, #0x0\n" int8x16_t a2 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 2);
"ldr q28, [%x[b_ptr], #0x20]\n" int8x16_t a3 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 3);
"ldr q27, [%x[b_ptr], #0x30]\n" float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d);
"movi v26.4s, #0x0\n"
"sub x20, x23, #0x2\n" int32x4_t ret0 = vdupq_n_s32(0);
"ld1r { v25.8h }, [x20]\n" int32x4_t ret1 = vdupq_n_s32(0);
"ldr q24, [%x[b_ptr], #-0x8]\n"
"sub x22, x22, #0x1\n" ret0 = vdotq_s32(ret0, b0 << 4, a0);
"add x23, x23, #0x22\n" ret1 = vdotq_s32(ret1, b1 << 4, a0);
"ld1r { v23.2d }, [x21], #0x8\n" ret0 = vdotq_s32(ret0, b2 << 4, a1);
"sshl v22.16b, v31.16b, v2.16b\n" ret1 = vdotq_s32(ret1, b3 << 4, a1);
"sshl v16.16b, v30.16b, v2.16b\n"
"add %x[b_ptr], %x[b_ptr], #0x48\n" ret0 = vdotq_s32(ret0, b0 & 0xf0U, a2);
"ld1r { v21.2d }, [x21], #0x8\n" ret1 = vdotq_s32(ret1, b1 & 0xf0U, a2);
"sshl v20.16b, v28.16b, v2.16b\n" ret0 = vdotq_s32(ret0, b2 & 0xf0U, a3);
"sshl v19.16b, v27.16b, v2.16b\n" ret1 = vdotq_s32(ret1, b3 & 0xf0U, a3);
"ld1r { v18.2d }, [x21], #0x8\n"
"ld1r { v17.2d }, [x21], #0x8\n" int32x4_t ret = vpaddq_s32(ret0, ret1);
"and v31.16b, v31.16b, v1.16b\n"
"and v30.16b, v30.16b, v1.16b\n" acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4),
".inst 0x4e9796dd // sdot v29.4s, v22.16b, v23.16b\n" vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd)));
".inst 0x4e97961a // sdot v26.4s, v16.16b, v23.16b\n" a_ptr++;
"and v28.16b, v28.16b, v1.16b\n" b_ptr++;
"and v27.16b, v27.16b, v1.16b\n" }
"fcvtl v25.4s, v25.4h\n" vst1q_f32(s, acc);
"fcvtl v16.4s, v24.4h\n" s += ncols_interleaved;
".inst 0x4e95969d // sdot v29.4s, v20.16b, v21.16b\n" }
".inst 0x4e95967a // sdot v26.4s, v19.16b, v21.16b\n"
"fmul v16.4s, v16.4s, v25.4s\n"
".inst 0x4e9297fd // sdot v29.4s, v31.16b, v18.16b\n"
".inst 0x4e9297da // sdot v26.4s, v30.16b, v18.16b\n"
".inst 0x4e91979d // sdot v29.4s, v28.16b, v17.16b\n"
".inst 0x4e91977a // sdot v26.4s, v27.16b, v17.16b\n"
"addp v29.4s, v29.4s, v26.4s\n"
"scvtf v29.4s, v29.4s, #0x4\n"
"fmla v0.4s, v29.4s, v16.4s\n"
"cbnz x22, 2b\n"
"sub %x[nc], %x[nc], #0x4\n"
"str q0, [%x[res_ptr], #0x0]\n"
"add %x[res_ptr], %x[res_ptr], #0x10\n"
"cbnz %x[nc], 1b\n"
: [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc)
: [a_ptr] "r" (a_ptr), [nb] "r" (nb)
: "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
);
return; return;
} }
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
float sumf[4]; float sumf[4];
int sumi; int sumi;

View file

@ -11,6 +11,8 @@
// //
#include "common.hpp" #include "common.hpp"
#include "ggml-backend-impl.h"
#include "ggml-impl.h" #include "ggml-impl.h"
int get_current_device_id() { int get_current_device_id() {
@ -65,9 +67,9 @@ void ggml_sycl_op_flatten(ggml_backend_sycl_context & ctx, const ggml_tensor *sr
const ggml_sycl_op_flatten_t op) try { const ggml_sycl_op_flatten_t op) try {
const bool use_src1 = src1 != nullptr; const bool use_src1 = src1 != nullptr;
if(use_src1)
GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_TYPE_GPU_SPLIT); GGML_ASSERT(strcmp(src1->buffer->buft->iface.get_name(src1->buffer->buft), GGML_SYCL_NAME "_Split") != 0);
GGML_ASSERT( dst->backend != GGML_BACKEND_TYPE_GPU_SPLIT); GGML_ASSERT(strcmp(dst->buffer->buft->iface.get_name(dst->buffer->buft), GGML_SYCL_NAME "_Split") != 0);
// dd = data device // dd = data device
float * src0_ddf = (float *) src0->data; float * src0_ddf = (float *) src0->data;

View file

@ -26,7 +26,11 @@
#define GGML_COMMON_DECL_SYCL #define GGML_COMMON_DECL_SYCL
#define GGML_COMMON_IMPL_SYCL #define GGML_COMMON_IMPL_SYCL
/* suppress warning spam */
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnested-anon-types"
#include "ggml-common.h" #include "ggml-common.h"
#pragma clang diagnostic pop
void* ggml_sycl_host_malloc(size_t size); void* ggml_sycl_host_malloc(size_t size);
void ggml_sycl_host_free(void* ptr); void ggml_sycl_host_free(void* ptr);

View file

@ -288,10 +288,8 @@ ggml_backend_sycl_buffer_init_tensor(ggml_backend_buffer_t buffer,
ggml_tensor *tensor) try { ggml_tensor *tensor) try {
ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *)buffer->context; ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *)buffer->context;
if (tensor->view_src != NULL && tensor->view_offs == 0) { if (tensor->view_src != NULL) {
assert(tensor->view_src->buffer->buft == buffer->buft); assert(tensor->view_src->buffer->buft == buffer->buft);
tensor->backend = tensor->view_src->backend;
tensor->extra = tensor->view_src->extra;
return; return;
} }
@ -539,7 +537,7 @@ ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device) {
auto dev_count = ggml_backend_sycl_get_device_count(); auto dev_count = ggml_backend_sycl_get_device_count();
if (device>=dev_count or device<0) { if (device>=dev_count or device<0) {
printf("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n", GGML_LOG_ERROR("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n",
device, dev_count-1); device, dev_count-1);
GGML_ASSERT(device<dev_count); GGML_ASSERT(device<dev_count);
} }
@ -567,7 +565,7 @@ ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(ggml_backend_sycl_conte
int device = ctx->device; int device = ctx->device;
if (device>=ggml_sycl_info().device_count or device<0) { if (device>=ggml_sycl_info().device_count or device<0) {
printf("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n", GGML_LOG_ERROR("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n",
device, ggml_sycl_info().device_count-1); device, ggml_sycl_info().device_count-1);
GGML_ASSERT(device<ggml_sycl_info().device_count); GGML_ASSERT(device<ggml_sycl_info().device_count);
} }
@ -746,7 +744,7 @@ ggml_backend_sycl_split_buffer_init_tensor(ggml_backend_buffer_t buffer,
size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
} }
// FIXME: do not crash if cudaMalloc fails // FIXME: do not crash if SYCL Buffer alloc fails
// currently, init_tensor cannot fail, it needs to be fixed in ggml-backend first // currently, init_tensor cannot fail, it needs to be fixed in ggml-backend first
ggml_sycl_set_device(i); ggml_sycl_set_device(i);
const queue_ptr stream = ctx->streams[i]; const queue_ptr stream = ctx->streams[i];
@ -788,7 +786,6 @@ ggml_backend_sycl_split_buffer_init_tensor(ggml_backend_buffer_t buffer,
CHECK_TRY_ERROR(extra->events[i][is] = new sycl::event())); CHECK_TRY_ERROR(extra->events[i][is] = new sycl::event()));
} }
} }
tensor->backend = GGML_BACKEND_TYPE_GPU_SPLIT;
tensor->extra = extra; tensor->extra = extra;
} }
catch (sycl::exception const &exc) { catch (sycl::exception const &exc) {
@ -2349,12 +2346,22 @@ static dpct::err0 ggml_sycl_cpy_tensor_2d(void *dst,
dpct::memcpy_direction kind; dpct::memcpy_direction kind;
char * src_ptr; char * src_ptr;
if (src->backend == GGML_BACKEND_TYPE_CPU) { if (ggml_backend_buffer_is_host(src->buffer)) {
kind = dpct::host_to_device; kind = dpct::host_to_device;
//GGML_SYCL_DEBUG("%s: Host buffer type src tensor\n", __func__);
src_ptr = (char *) src->data; src_ptr = (char *) src->data;
// GGML_SYCL_DEBUG("ggml_sycl_cpy_tensor_2d GGML_BACKEND_TYPE_CPU src_ptr %p\n", src_ptr); // GGML_SYCL_DEBUG("ggml_sycl_cpy_tensor_2d GGML_BACKEND_TYPE_CPU src_ptr %p\n", src_ptr);
} else if (src->backend == GGML_BACKEND_TYPE_GPU || src->backend == GGML_BACKEND_TYPE_GPU_SPLIT) { } else if (ggml_backend_buffer_is_sycl(src->buffer)) {
GGML_ASSERT(src->backend != GGML_BACKEND_TYPE_GPU_SPLIT || (i1_low == 0 && i1_high == src->ne[1])); // If buffer is a SYCL buffer
//GGML_SYCL_DEBUG("%s: SYCL buffer type src tensor\n", __func__);
kind = dpct::device_to_device;
src_ptr = (char *) src->data;
} else if (ggml_backend_buffer_is_sycl_split(src->buffer)) {
/*
If buffer is a SYCL split buffer
*/
//GGML_SYCL_DEBUG("%s: Split buffer type src tensor\n", __func__);
GGML_ASSERT(i1_low == 0 && i1_high == src->ne[1]);
kind = dpct::device_to_device; kind = dpct::device_to_device;
ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra; ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra;
int id; int id;
@ -2857,8 +2864,8 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
const int nb2 = dst->nb[2]; const int nb2 = dst->nb[2];
const int nb3 = dst->nb[3]; const int nb3 = dst->nb[3];
GGML_ASSERT(dst->backend != GGML_BACKEND_TYPE_GPU_SPLIT); GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(dst->buffer));
GGML_ASSERT(src1->backend != GGML_BACKEND_TYPE_GPU_SPLIT); GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src1->buffer));
GGML_ASSERT(src1->type == GGML_TYPE_F32 || (src1->ne[2] == 1 && src1->ne[3] == 1)); GGML_ASSERT(src1->type == GGML_TYPE_F32 || (src1->ne[2] == 1 && src1->ne[3] == 1));
GGML_ASSERT(ne12 >= ne02 && ne12 % ne02 == 0); GGML_ASSERT(ne12 >= ne02 && ne12 % ne02 == 0);
@ -2878,7 +2885,7 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
int64_t src1_padded_col_size = GGML_PAD(ne10, MATRIX_ROW_PADDING); int64_t src1_padded_col_size = GGML_PAD(ne10, MATRIX_ROW_PADDING);
const bool split = src0->backend == GGML_BACKEND_TYPE_GPU_SPLIT; const bool split = ggml_backend_buffer_is_sycl_split(src0->buffer);
GGML_ASSERT(!(split && ne02 > 1)); GGML_ASSERT(!(split && ne02 > 1));
GGML_ASSERT(!(split && ne03 > 1)); GGML_ASSERT(!(split && ne03 > 1));
GGML_ASSERT(!(split && ne02 < ne12)); GGML_ASSERT(!(split && ne02 < ne12));
@ -3198,7 +3205,7 @@ static void ggml_sycl_mul_mat_vec_p021(ggml_backend_sycl_context & ctx, const gg
const ggml_tensor *src1, const ggml_tensor *src1,
ggml_tensor *dst) try { ggml_tensor *dst) try {
GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1)); GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
GGML_ASSERT(src0->backend != GGML_BACKEND_TYPE_GPU_SPLIT); GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src0->buffer));
GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation
GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // 0213 permutation GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // 0213 permutation
GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == GGML_TYPE_F16);
@ -3231,7 +3238,7 @@ static void ggml_sycl_mul_mat_vec_nc(ggml_backend_sycl_context & ctx, const ggml
GGML_ASSERT(!ggml_is_transposed(src0)); GGML_ASSERT(!ggml_is_transposed(src0));
GGML_ASSERT(!ggml_is_transposed(src1)); GGML_ASSERT(!ggml_is_transposed(src1));
GGML_ASSERT(!ggml_is_permuted(src0)); GGML_ASSERT(!ggml_is_permuted(src0));
GGML_ASSERT(src0->backend != GGML_BACKEND_TYPE_GPU_SPLIT); GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src0->buffer));
GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32);
@ -3293,7 +3300,7 @@ static void ggml_sycl_mul_mat_batched_sycl(ggml_backend_sycl_context & ctx,
ggml_tensor *dst) try { ggml_tensor *dst) try {
GGML_ASSERT(!ggml_is_transposed(src0)); GGML_ASSERT(!ggml_is_transposed(src0));
GGML_ASSERT(!ggml_is_transposed(src1)); GGML_ASSERT(!ggml_is_transposed(src1));
GGML_ASSERT(src0->backend != GGML_BACKEND_TYPE_GPU_SPLIT); GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src0->buffer));
GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_TENSOR_BINARY_OP_LOCALS GGML_TENSOR_BINARY_OP_LOCALS
@ -4638,10 +4645,9 @@ static ggml_backend_dev_t ggml_backend_sycl_reg_get_device(ggml_backend_reg_t re
static void *ggml_backend_sycl_reg_get_proc_address(ggml_backend_reg_t reg, const char *name) { static void *ggml_backend_sycl_reg_get_proc_address(ggml_backend_reg_t reg, const char *name) {
GGML_UNUSED(reg); GGML_UNUSED(reg);
// TODO: update to the current function signature if (strcmp(name, "ggml_backend_split_buffer_type") == 0) {
//if (strcmp(name, "ggml_backend_split_buffer_type") == 0) { return (void *)ggml_backend_sycl_split_buffer_type;
// return (void *)ggml_backend_sycl_split_buffer_type; }
//}
// SYCL doesn't support registering host memory, left here for reference // SYCL doesn't support registering host memory, left here for reference
// "ggml_backend_register_host_buffer" // "ggml_backend_register_host_buffer"

View file

@ -1754,7 +1754,7 @@ static void grammar_accept_token(FileFormat file_format, int32_t n_vocab, struct
const auto & code_points = decoded.first; const auto & code_points = decoded.first;
for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) { for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
auto prev_stacks = grammar->stacks; auto prev_stacks = grammar->stacks;
llama_grammar_accept(grammar->rules, prev_stacks, *it, grammar->stacks); llama_grammar_accept(grammar, *it);
} }
grammar->partial_utf8 = decoded.second; grammar->partial_utf8 = decoded.second;
GGML_ASSERT(!grammar->stacks.empty()); GGML_ASSERT(!grammar->stacks.empty());

View file

@ -56,7 +56,7 @@ maxhordelen = 400
modelbusy = threading.Lock() modelbusy = threading.Lock()
requestsinqueue = 0 requestsinqueue = 0
defaultport = 5001 defaultport = 5001
KcppVersion = "1.80" KcppVersion = "1.80.1"
showdebug = True showdebug = True
guimode = False guimode = False
showsamplerwarning = True showsamplerwarning = True

View file

@ -822,15 +822,11 @@ llama_grammar_stacks & llama_grammar_get_stacks(struct llama_grammar * grammar)
return grammar->stacks; return grammar->stacks;
} }
void llama_grammar_accept( void llama_grammar_accept(struct llama_grammar * grammar, uint32_t chr) {
const llama_grammar_rules & rules, llama_grammar_stacks stacks_new;
const llama_grammar_stacks & stacks, stacks_new.reserve(grammar->stacks.size());
const uint32_t chr,
llama_grammar_stacks & stacks_new) {
stacks_new.clear();
stacks_new.reserve(stacks.size());
for (const auto & stack : stacks) { for (const auto & stack : grammar->stacks) {
if (stack.empty()) { if (stack.empty()) {
continue; continue;
} }
@ -844,9 +840,11 @@ void llama_grammar_accept(
if (!llama_grammar_is_end_of_sequence(pos)) { if (!llama_grammar_is_end_of_sequence(pos)) {
new_stack.push_back(pos); new_stack.push_back(pos);
} }
llama_grammar_advance_stack(rules, new_stack, stacks_new); llama_grammar_advance_stack(grammar->rules, new_stack, stacks_new);
} }
} }
grammar->stacks = std::move(stacks_new);
} }
llama_grammar_candidates llama_grammar_reject_candidates_for_stack( llama_grammar_candidates llama_grammar_reject_candidates_for_stack(
@ -1051,7 +1049,12 @@ void llama_grammar_free_impl(struct llama_grammar * grammar) {
} }
struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) { struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) {
llama_grammar * result = new llama_grammar { grammar.vocab, grammar.rules, grammar.stacks, grammar.partial_utf8, }; llama_grammar * result = new llama_grammar {
grammar.vocab,
grammar.rules,
grammar.stacks,
grammar.partial_utf8,
};
// redirect elements in stacks to point to new rules // redirect elements in stacks to point to new rules
for (size_t is = 0; is < result->stacks.size(); is++) { for (size_t is = 0; is < result->stacks.size(); is++) {
@ -1126,11 +1129,8 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token
const auto decoded = decode_utf8(piece, grammar.partial_utf8); const auto decoded = decode_utf8(piece, grammar.partial_utf8);
const auto & code_points = decoded.first; const auto & code_points = decoded.first;
llama_grammar_stacks stacks_new;
for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) { for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
llama_grammar_accept(grammar.rules, grammar.stacks, *it, stacks_new); llama_grammar_accept(&grammar, *it);
grammar.stacks = std::move(stacks_new);
} }
grammar.partial_utf8 = decoded.second; grammar.partial_utf8 = decoded.second;

View file

@ -58,6 +58,7 @@ using llama_grammar_rules = std::vector<llama_grammar_rule>;
using llama_grammar_stacks = std::vector<llama_grammar_stack>; using llama_grammar_stacks = std::vector<llama_grammar_stack>;
using llama_grammar_candidates = std::vector<llama_grammar_candidate>; using llama_grammar_candidates = std::vector<llama_grammar_candidate>;
// TODO: remove, needed for tests atm
const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar * grammar); const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar * grammar);
llama_grammar_stacks & llama_grammar_get_stacks( struct llama_grammar * grammar); llama_grammar_stacks & llama_grammar_get_stacks( struct llama_grammar * grammar);
@ -65,11 +66,7 @@ const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar
// be positioned at a character range (see `llama_grammar_advance_stack`), and // be positioned at a character range (see `llama_grammar_advance_stack`), and
// produces the N possible stacks if the given char is accepted at those // produces the N possible stacks if the given char is accepted at those
// positions // positions
void llama_grammar_accept( void llama_grammar_accept(struct llama_grammar * grammar, uint32_t chr);
const llama_grammar_rules & rules,
const llama_grammar_stacks & stacks,
uint32_t chr,
llama_grammar_stacks & stacks_new);
std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack( std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
const llama_grammar_rules & rules, const llama_grammar_rules & rules,

View file

@ -134,7 +134,6 @@ static void zeros(std::ofstream & file, size_t n) {
} }
} }
static bool phi3swa_warning_showed = false; //to warn when old phi3 model has no SWA
static bool old_mixtral_warning_showed = false; static bool old_mixtral_warning_showed = false;
static int clblast_offload_fallback_layers = 0; static int clblast_offload_fallback_layers = 0;
static int layer_name_to_number(std::string inputString) static int layer_name_to_number(std::string inputString)
@ -6633,7 +6632,8 @@ static void llm_load_vocab(
tokenizer_pre == "jina-v1-en" || tokenizer_pre == "jina-v1-en" ||
tokenizer_pre == "jina-v2-es" || tokenizer_pre == "jina-v2-es" ||
tokenizer_pre == "jina-v2-de" || tokenizer_pre == "jina-v2-de" ||
tokenizer_pre == "jina-v2-code") { tokenizer_pre == "jina-v2-code" ||
tokenizer_pre == "roberta-bpe") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2; vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
} else if ( } else if (
tokenizer_pre == "refact") { tokenizer_pre == "refact") {
@ -13486,21 +13486,13 @@ struct llm_build_context {
struct ggml_tensor * inp_pos = build_inp_pos(); struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads) // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask_swa; struct ggml_tensor * KQ_mask = nullptr;
if(hparams.n_swa==0) if (hparams.n_swa == 0) {
{ // Phi-4 doesn't use sliding window attention
if(!phi3swa_warning_showed) KQ_mask = build_inp_KQ_mask();
{ } else {
phi3swa_warning_showed = true; KQ_mask = build_inp_KQ_mask_swa();
printf("\nWarning: PHI3 model did not contain sliding window!!!\nSWA is disabled. Model may need a new quant.\n");
} }
KQ_mask_swa = build_inp_KQ_mask();
}
else
{
KQ_mask_swa = build_inp_KQ_mask_swa();
}
for (int il = 0; il < n_layer; ++il) { for (int il = 0; il < n_layer; ++il) {
auto residual = inpL; auto residual = inpL;
@ -13558,7 +13550,7 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, lctx, kv_self, gf, cur = llm_build_kv(ctx0, lctx, kv_self, gf,
model.layers[il].wo, model.layers[il].bo, model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask_swa, n_tokens, kv_head, n_kv, 1.0f, cb, il); Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
} }
if (il == n_layer - 1) { if (il == n_layer - 1) {

View file

@ -634,7 +634,7 @@ static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
HANDCRAFTED_KV_BAD_KEY_SIZE, HANDCRAFTED_KV_BAD_KEY_SIZE,
HANDCRAFTED_KV_BAD_TYPE, HANDCRAFTED_KV_BAD_TYPE,
HANDCRAFTED_KV_BAD_VALUE_SIZE, // HANDCRAFTED_KV_BAD_VALUE_SIZE, // FIXME sanitizer limit
// HANDCRAFTED_FILE_TYPE_DUPLICATE_KEY, // FIXME // HANDCRAFTED_FILE_TYPE_DUPLICATE_KEY, // FIXME
HANDCRAFTED_KV_SUCCESS, HANDCRAFTED_KV_SUCCESS,