Merge branch 'upstream' into concedo_experimental

# Conflicts:
#	.github/workflows/bench.yml
#	.github/workflows/build.yml
#	.github/workflows/python-check-requirements.yml
#	README.md
#	docs/backend/SYCL.md
#	flake.lock
#	ggml/CMakeLists.txt
#	ggml/src/kompute-shaders/op_rope_f16.comp
#	ggml/src/kompute-shaders/op_rope_f32.comp
#	ggml/src/kompute-shaders/rope_common.comp
This commit is contained in:
Concedo 2024-08-14 22:25:43 +08:00
commit e8de0af3ec
18 changed files with 1326 additions and 101 deletions

View file

@ -369,6 +369,9 @@ namespace grammar_parser {
} }
// Validate the state to ensure that all rules are defined // Validate the state to ensure that all rules are defined
for (const auto & rule : state.rules) { for (const auto & rule : state.rules) {
if (rule.empty()) {
throw std::runtime_error("Undefined rule");
}
for (const auto & elem : rule) { for (const auto & elem : rule) {
if (elem.type == LLAMA_GRETYPE_RULE_REF) { if (elem.type == LLAMA_GRETYPE_RULE_REF) {
// Ensure that the rule at that location exists // Ensure that the rule at that location exists

View file

@ -17,9 +17,9 @@ For example:
```bash ```bash
./bin/llama-export-lora \ ./bin/llama-export-lora \
-m open-llama-3b-v2-q8_0.gguf \ -m open-llama-3b-v2.gguf \
-o open-llama-3b-v2-q8_0-english2tokipona-chat.gguf \ -o open-llama-3b-v2-english2tokipona-chat.gguf \
--lora lora-open-llama-3b-v2-q8_0-english2tokipona-chat-LATEST.gguf --lora lora-open-llama-3b-v2-english2tokipona-chat-LATEST.gguf
``` ```
Multiple LORA adapters can be applied by passing multiple `--lora FNAME` or `--lora-scaled FNAME S` command line parameters: Multiple LORA adapters can be applied by passing multiple `--lora FNAME` or `--lora-scaled FNAME S` command line parameters:

View file

@ -10,6 +10,12 @@
static bool g_verbose = false; static bool g_verbose = false;
struct tensor_transformation {
struct ggml_tensor * in;
struct ggml_tensor * out;
bool is_copy;
};
static std::string get_kv_str(struct gguf_context * ctx_gguf, const std::string & key){ static std::string get_kv_str(struct gguf_context * ctx_gguf, const std::string & key){
int id = gguf_find_key(ctx_gguf, key.c_str()); int id = gguf_find_key(ctx_gguf, key.c_str());
return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf, id)); return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf, id));
@ -198,8 +204,7 @@ struct lora_merge_ctx {
} }
// mapping base tensor to out tensor (same shape with base, but different type) // mapping base tensor to out tensor (same shape with base, but different type)
// if out_tensor == nullptr, we only copy it std::vector<tensor_transformation> trans;
std::vector<std::pair<struct ggml_tensor *, struct ggml_tensor *>> base_to_out_tensors;
for (auto & it : base_model.tensors) { for (auto & it : base_model.tensors) {
bool t_a = true; bool t_a = true;
bool t_b = true; bool t_b = true;
@ -212,14 +217,22 @@ struct lora_merge_ctx {
// only copy // only copy
struct ggml_tensor * cpy_tensor = ggml_dup_tensor(ctx_out_ggml, base_tensor); struct ggml_tensor * cpy_tensor = ggml_dup_tensor(ctx_out_ggml, base_tensor);
ggml_set_name(cpy_tensor, base_tensor->name); ggml_set_name(cpy_tensor, base_tensor->name);
base_to_out_tensors.push_back(std::make_pair(cpy_tensor, nullptr)); trans.push_back({
cpy_tensor,
cpy_tensor,
true,
});
gguf_add_tensor(ctx_out, cpy_tensor); gguf_add_tensor(ctx_out, cpy_tensor);
} else if (t_a && t_b) { } else if (t_a && t_b) {
// need merging // need merging
struct ggml_tensor * out_tensor = ggml_new_tensor( struct ggml_tensor * out_tensor = ggml_new_tensor(
ctx_out_ggml, get_out_tensor_type(base_tensor), GGML_MAX_DIMS, base_tensor->ne); ctx_out_ggml, get_out_tensor_type(base_tensor), GGML_MAX_DIMS, base_tensor->ne);
ggml_set_name(out_tensor, base_tensor->name); ggml_set_name(out_tensor, base_tensor->name);
base_to_out_tensors.push_back(std::make_pair(base_tensor, out_tensor)); trans.push_back({
base_tensor,
out_tensor,
false,
});
gguf_add_tensor(ctx_out, out_tensor); gguf_add_tensor(ctx_out, out_tensor);
} else { } else {
throw std::runtime_error("tensor " + it.first + " missing either lora_a or lora_b"); throw std::runtime_error("tensor " + it.first + " missing either lora_a or lora_b");
@ -234,12 +247,12 @@ struct lora_merge_ctx {
// process base model tensors // process base model tensors
size_t n_merged = 0; size_t n_merged = 0;
for (auto & it : base_to_out_tensors) { for (auto & it : trans) {
if (it.second != nullptr) { if (!it.is_copy) {
merge_tensor(it.first, it.second); merge_tensor(it.in, it.out);
n_merged++; n_merged++;
} else { } else {
copy_tensor(it.first); copy_tensor(it.in);
} }
} }
@ -252,7 +265,7 @@ struct lora_merge_ctx {
} }
printf("%s : merged %ld tensors with lora adapters\n", __func__, n_merged); printf("%s : merged %ld tensors with lora adapters\n", __func__, n_merged);
printf("%s : wrote %ld tensors to output file\n", __func__, base_to_out_tensors.size()); printf("%s : wrote %ld tensors to output file\n", __func__, trans.size());
} }
void copy_tensor(struct ggml_tensor * base) { void copy_tensor(struct ggml_tensor * base) {
@ -285,6 +298,10 @@ struct lora_merge_ctx {
for (size_t i = 0; i < adapters.size(); ++i) { for (size_t i = 0; i < adapters.size(); ++i) {
auto t_a = adapters[i]->get_tensor(name_lora_a); auto t_a = adapters[i]->get_tensor(name_lora_a);
auto t_b = adapters[i]->get_tensor(name_lora_b); auto t_b = adapters[i]->get_tensor(name_lora_b);
// TODO: add support for quantized lora
if (ggml_is_quantized(t_a->type) || ggml_is_quantized(t_b->type)) {
throw std::runtime_error("quantized LoRA adapters is not supported, please retry with f16 or f32");
}
inp_a[i] = ggml_dup_tensor(ctx, t_a); inp_a[i] = ggml_dup_tensor(ctx, t_a);
inp_b[i] = ggml_dup_tensor(ctx, t_b); inp_b[i] = ggml_dup_tensor(ctx, t_b);
} }

View file

@ -2,4 +2,4 @@
--extra-index-url https://download.pytorch.org/whl/cpu --extra-index-url https://download.pytorch.org/whl/cpu
pillow~=10.2.0 pillow~=10.2.0
torch~=2.2.1 torch~=2.2.1
torchvision==0.17.1 torchvision~=0.17.1

View file

@ -632,6 +632,7 @@ struct server_context {
bool clean_kv_cache = true; bool clean_kv_cache = true;
bool add_bos_token = true; bool add_bos_token = true;
bool has_eos_token = false;
int32_t n_ctx; // total context for all clients / slots int32_t n_ctx; // total context for all clients / slots
@ -694,7 +695,7 @@ struct server_context {
n_ctx = llama_n_ctx(ctx); n_ctx = llama_n_ctx(ctx);
add_bos_token = llama_should_add_bos_token(model); add_bos_token = llama_should_add_bos_token(model);
GGML_ASSERT(llama_add_eos_token(model) != 1); has_eos_token = llama_add_eos_token(model) != 1;
return true; return true;
} }
@ -754,13 +755,13 @@ struct server_context {
default_generation_settings_for_props = get_formated_generation(slots.front()); default_generation_settings_for_props = get_formated_generation(slots.front());
default_generation_settings_for_props["seed"] = -1; default_generation_settings_for_props["seed"] = -1;
// the update_slots() logic will always submit a maximum of n_batch tokens // the update_slots() logic will always submit a maximum of n_batch or n_parallel tokens
// note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used) // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
{ {
const int32_t n_batch = llama_n_batch(ctx); const int32_t n_batch = llama_n_batch(ctx);
// only a single seq_id per token is needed // only a single seq_id per token is needed
batch = llama_batch_init(n_batch, 0, 1); batch = llama_batch_init(std::max(n_batch, params.n_parallel), 0, 1);
} }
metrics.init(); metrics.init();
@ -1032,7 +1033,7 @@ struct server_context {
{ {
slot.sparams.logit_bias.clear(); slot.sparams.logit_bias.clear();
if (json_value(data, "ignore_eos", false)) { if (json_value(data, "ignore_eos", false) && has_eos_token) {
slot.sparams.logit_bias[llama_token_eos(model)] = -INFINITY; slot.sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
} }
@ -1137,28 +1138,19 @@ struct server_context {
if (!system_prompt.empty()) { if (!system_prompt.empty()) {
system_tokens = ::llama_tokenize(ctx, system_prompt, true); system_tokens = ::llama_tokenize(ctx, system_prompt, true);
const int32_t n_batch = llama_n_batch(ctx);
const int32_t n_tokens_prompt = system_tokens.size();
for (int32_t i = 0; i < n_tokens_prompt; i += n_batch) {
const int32_t n_tokens = std::min(n_batch, n_tokens_prompt - i);
llama_batch_clear(batch); llama_batch_clear(batch);
for (int i = 0; i < (int)system_tokens.size(); ++i) { for (int32_t j = 0; j < n_tokens; ++j) {
llama_batch_add(batch, system_tokens[i], i, { 0 }, false); llama_batch_add(batch, system_tokens[i + j], i + j, { 0 }, false);
} }
const int32_t n_batch = llama_n_batch(ctx); if (llama_decode(ctx, batch) != 0) {
for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
const int32_t n_tokens = std::min(params.n_batch, batch.n_tokens - i);
llama_batch batch_view = {
n_tokens,
batch.token + i,
nullptr,
batch.pos + i,
batch.n_seq_id + i,
batch.seq_id + i,
batch.logits + i,
0, 0, 0, // unused
};
if (llama_decode(ctx, batch_view) != 0) {
LOG_ERROR("llama_decode() failed", {}); LOG_ERROR("llama_decode() failed", {});
return; return;
} }

View file

@ -244,6 +244,8 @@
#define GGML_EXIT_SUCCESS 0 #define GGML_EXIT_SUCCESS 0
#define GGML_EXIT_ABORTED 1 #define GGML_EXIT_ABORTED 1
#define GGML_ROPE_TYPE_NEOX 2
#define GGUF_MAGIC "GGUF" #define GGUF_MAGIC "GGUF"
#define GGUF_VERSION 3 #define GGUF_VERSION 3
@ -1459,8 +1461,8 @@ extern "C" {
struct ggml_tensor * b); struct ggml_tensor * b);
// rotary position embedding // rotary position embedding
// if mode & 1 == 1, skip n_past elements (NOT SUPPORTED) // if (mode & 1) - skip n_past elements (NOT SUPPORTED)
// if mode & 2 == 1, GPT-NeoX style // if (mode & GGML_ROPE_TYPE_NEOX) - GPT-NeoX style
// //
// b is an int32 vector with size a->ne[2], it contains the positions // b is an int32 vector with size a->ne[2], it contains the positions
GGML_API struct ggml_tensor * ggml_rope( GGML_API struct ggml_tensor * ggml_rope(

View file

@ -2881,7 +2881,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast,
beta_slow, corr_dims); beta_slow, corr_dims);
const bool is_neox = mode & 2; const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
// init cos/sin cache // init cos/sin cache
ggml_cann_pool_alloc sin_allocator( ggml_cann_pool_alloc sin_allocator(

View file

@ -226,7 +226,7 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
const bool is_neox = mode & 2; const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
const int32_t * pos = (const int32_t *) src1_d; const int32_t * pos = (const int32_t *) src1_d;

View file

@ -2313,7 +2313,7 @@ static enum ggml_status ggml_metal_graph_compute(
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
const bool is_neox = mode & 2; const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
id<MTLComputePipelineState> pipeline = nil; id<MTLComputePipelineState> pipeline = nil;

View file

@ -226,7 +226,7 @@ void ggml_sycl_op_rope(
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
const bool is_neox = mode & 2; const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
const int32_t * pos = (const int32_t *) src1_dd; const int32_t * pos = (const int32_t *) src1_dd;

View file

@ -268,6 +268,10 @@ struct vk_subbuffer {
vk_buffer buffer; vk_buffer buffer;
uint64_t offset; uint64_t offset;
uint64_t size; uint64_t size;
operator vk::DescriptorBufferInfo() const {
return { buffer->buffer, offset, size };
}
}; };
struct vk_semaphore { struct vk_semaphore {
@ -1063,13 +1067,14 @@ static vk_subbuffer ggml_vk_subbuffer(vk_buffer& buf) {
static void ggml_vk_sync_buffers(vk_context& ctx) { static void ggml_vk_sync_buffers(vk_context& ctx) {
VK_LOG_DEBUG("ggml_vk_sync_buffers()"); VK_LOG_DEBUG("ggml_vk_sync_buffers()");
const std::vector<vk::MemoryBarrier> mem_barriers{ { { vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite }, { vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite } } };
ctx->s->buffer.pipelineBarrier( ctx->s->buffer.pipelineBarrier(
ctx->q->stage_flags, ctx->q->stage_flags,
ctx->q->stage_flags, ctx->q->stage_flags,
{}, {},
mem_barriers, { {
{vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite},
{vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite}
} },
{}, {},
{} {}
); );
@ -2420,28 +2425,23 @@ static vk_submission ggml_vk_begin_submission(vk_device& device, vk_queue& q, bo
return s; return s;
} }
static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline, std::vector<vk_subbuffer>&& buffers, size_t push_constant_size, const void* push_constants, std::array<uint32_t, 3> elements) {
static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context* ctx, vk_context& subctx, vk_pipeline& pipeline, std::initializer_list<vk::DescriptorBufferInfo> const& descriptor_buffer_infos, size_t push_constant_size, const void* push_constants, std::array<uint32_t, 3> elements) {
const uint32_t wg0 = CEIL_DIV(elements[0], pipeline->wg_denoms[0]); const uint32_t wg0 = CEIL_DIV(elements[0], pipeline->wg_denoms[0]);
const uint32_t wg1 = CEIL_DIV(elements[1], pipeline->wg_denoms[1]); const uint32_t wg1 = CEIL_DIV(elements[1], pipeline->wg_denoms[1]);
const uint32_t wg2 = CEIL_DIV(elements[2], pipeline->wg_denoms[2]); const uint32_t wg2 = CEIL_DIV(elements[2], pipeline->wg_denoms[2]);
VK_LOG_DEBUG("ggml_vk_dispatch_pipeline(" << pipeline->name << ", {"; VK_LOG_DEBUG("ggml_vk_dispatch_pipeline(" << pipeline->name << ", {";
for (auto& buffer : buffers) { for (auto& buffer : descriptor_buffer_infos) {
std::cerr << "(" << buffer.buffer << ", " << buffer.offset << ", " << buffer.size << "), "; std::cerr << "(" << buffer << ", " << buffer.offset << ", " << buffer.size << "), ";
} }
std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))"); std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))");
std::vector<vk::DescriptorBufferInfo> descriptor_buffer_infos;
std::vector<vk::WriteDescriptorSet> write_descriptor_sets;
GGML_ASSERT(pipeline->descriptor_set_idx < pipeline->descriptor_sets.size()); GGML_ASSERT(pipeline->descriptor_set_idx < pipeline->descriptor_sets.size());
GGML_ASSERT(buffers.size() == pipeline->parameter_count); GGML_ASSERT(descriptor_buffer_infos.size() == pipeline->parameter_count);
vk::DescriptorSet& descriptor_set = pipeline->descriptor_sets[pipeline->descriptor_set_idx++];
for (uint32_t i = 0; i < pipeline->parameter_count; i++) {
descriptor_buffer_infos.push_back({buffers[i].buffer->buffer, buffers[i].offset, buffers[i].size});
}
for (uint32_t i = 0; i < pipeline->parameter_count; i++) {
write_descriptor_sets.push_back({descriptor_set, i, 0, 1, vk::DescriptorType::eStorageBuffer, nullptr, &descriptor_buffer_infos[i]});
}
ctx->device->device.updateDescriptorSets(write_descriptor_sets, {}); vk::DescriptorSet& descriptor_set = pipeline->descriptor_sets[pipeline->descriptor_set_idx++];
vk::WriteDescriptorSet write_descriptor_set{ descriptor_set, 0, 0, pipeline->parameter_count, vk::DescriptorType::eStorageBuffer, nullptr, descriptor_buffer_infos.begin() };
ctx->device->device.updateDescriptorSets({ write_descriptor_set }, {});
subctx->s->buffer.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size, push_constants); subctx->s->buffer.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size, push_constants);
subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline); subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline);
@ -3123,7 +3123,7 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub
} else if (qx_needs_dequant) { } else if (qx_needs_dequant) {
const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) }; const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { { d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, { d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1}); ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
} }
if (y_non_contig) { if (y_non_contig) {
ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE }); ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
@ -3312,7 +3312,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context&
}; };
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, dmmv, ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
{ { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 }, { d_D, d_buf_offset, d_sz * ne22 * ne23} }, { vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 }, vk_subbuffer{ d_Y, y_buf_offset, y_sz * ne12 * ne13 }, vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23} },
sizeof(vk_mat_vec_push_constants), &pc, { groups_x, (uint32_t)(ne12 * ne13), groups_z }); sizeof(vk_mat_vec_push_constants), &pc, { groups_x, (uint32_t)(ne12 * ne13), groups_z });
} }
@ -3384,7 +3384,7 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c
// compute // compute
const std::array<uint32_t, 6> pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) }; const std::array<uint32_t, 6> pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, { { d_Qx, qx_buf_offset, qx_sz }, { d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, { d_D, d_buffer_offset, d_sz + d_shader_offset } }, 6 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 }); ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 6 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
} }
static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
@ -3459,7 +3459,8 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_con
// compute // compute
const std::array<uint32_t, 7> pc = { (uint32_t)ne00, (uint32_t)ne01, row_stride_x, channel_stride_x, (uint32_t)(ne12 / ne02), (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) }; const std::array<uint32_t, 7> pc = { (uint32_t)ne00, (uint32_t)ne01, row_stride_x, channel_stride_x, (uint32_t)(ne12 / ne02), (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, { { d_Qx, qx_buf_offset, qx_sz }, { d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, { d_D, d_buffer_offset, d_sz + d_shader_offset } }, 7 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 }); ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32,
{ vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 7 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
} }
static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
@ -3634,7 +3635,8 @@ static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context&
} else if (qx_needs_dequant) { } else if (qx_needs_dequant) {
const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) }; const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { { d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, { d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1}); ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0,
{ vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
} }
if (y_non_contig) { if (y_non_contig) {
ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE }); ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
@ -3834,7 +3836,8 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte
}; };
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, dmmv, ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
{ { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 }, { d_D, d_buf_offset, d_sz * ne22 * ne23}, { d_ids, ids_buf_offset, ids_sz } }, { vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 },
vk_subbuffer{ d_Y, y_buf_offset, y_sz * ne12 * ne13 }, vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23}, vk_subbuffer{ d_ids, ids_buf_offset, ids_sz } },
sizeof(vk_mat_vec_id_push_constants), &pc, { groups_x, (uint32_t)nei0, groups_z }); sizeof(vk_mat_vec_id_push_constants), &pc, { groups_x, (uint32_t)nei0, groups_z });
} }
@ -4050,7 +4053,7 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
case GGML_OP_ROPE: case GGML_OP_ROPE:
{ {
const int mode = ((const int32_t *) dst->op_params)[2]; const int mode = ((const int32_t *) dst->op_params)[2];
const bool is_neox = mode & 2; const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
if (is_neox) { if (is_neox) {
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
@ -4381,7 +4384,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
} }
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, subbuf_y, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements); ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, subbuf_y, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
} else if (op == GGML_OP_ROPE) { } else if (op == GGML_OP_ROPE) {
// Empty src2 is possible in rope, but the shader needs a buffer // Empty src2 is possible in rope, but the shader needs a buffer
vk_subbuffer subbuf_z; vk_subbuffer subbuf_z;
@ -4392,20 +4395,20 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
} }
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, { d_Y, y_buf_offset, y_sz }, subbuf_z, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements); ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, subbuf_z, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
} else if (op == GGML_OP_IM2COL) { } else if (op == GGML_OP_IM2COL) {
// im2col uses only src1 and dst buffers // im2col uses only src1 and dst buffers
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_Y, y_buf_offset, y_sz }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements); ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
} else if (use_src2) { } else if (use_src2) {
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, { d_Y, y_buf_offset, y_sz }, { d_Z, z_buf_offset, z_sz }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements); ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_Z, z_buf_offset, z_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
} else if (use_src1) { } else if (use_src1) {
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, { d_Y, y_buf_offset, y_sz }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements); ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
} else { } else {
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset, x_sz }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements); ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
} }
} else { } else {
GGML_ASSERT(op != GGML_OP_SOFT_MAX); GGML_ASSERT(op != GGML_OP_SOFT_MAX);
@ -4442,10 +4445,10 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
if (use_src1) { if (use_src1) {
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset + x_offset, x_sz }, { d_Y, y_buf_offset + y_offset, y_sz }, { d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements); ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset + x_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset + y_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements);
} else { } else {
ggml_vk_sync_buffers(subctx); ggml_vk_sync_buffers(subctx);
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { { d_X, x_buf_offset + x_offset, x_sz }, { d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements); ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset + x_offset, x_sz }, vk_subbuffer{ d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements);
} }
} }
} }

View file

@ -14148,7 +14148,7 @@ static void ggml_compute_forward_rope_f32(
float corr_dims[2]; float corr_dims[2];
ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
const bool is_neox = mode & 2; const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
const float * freq_factors = NULL; const float * freq_factors = NULL;
if (src2 != NULL) { if (src2 != NULL) {
@ -14273,7 +14273,7 @@ static void ggml_compute_forward_rope_f16(
float corr_dims[2]; float corr_dims[2];
ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
const bool is_neox = mode & 2; const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
const float * freq_factors = NULL; const float * freq_factors = NULL;
if (src2 != NULL) { if (src2 != NULL) {
@ -21232,7 +21232,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
(int64_t) info->ne[2] * (int64_t) info->ne[2] *
(int64_t) info->ne[3]; (int64_t) info->ne[3];
if (ne % ggml_blck_size(info->type) != 0) { if (ggml_blck_size(info->type) == 0 || ne % ggml_blck_size(info->type) != 0) {
fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%" PRId64 ")\n", fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%" PRId64 ")\n",
__func__, info->name.data, (int) info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type)); __func__, info->name.data, (int) info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type));
fclose(file); fclose(file);

File diff suppressed because it is too large Load diff

237
gguf-py/tests/test_quants.py Executable file
View file

@ -0,0 +1,237 @@
#!/usr/bin/env python3
# Test gguf.quants so that it exactly matches the C implementation of the (de)quantization
# NOTE: this is kind of a mess, but at least it worked for initially testing the Python implementations.
from __future__ import annotations
import argparse
from math import prod
import os
import sys
from pathlib import Path
import ctypes
import logging
import numpy as np
# Necessary to load the local gguf package
if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists():
sys.path.insert(0, str(Path(__file__).parent.parent))
import gguf
from gguf.constants import GGMLQuantizationType
logger = logging.getLogger("test-quants")
c_float_p = ctypes.POINTER(ctypes.c_float)
class ggml_init_params(ctypes.Structure):
_fields_ = [
("mem_size", ctypes.c_size_t),
("mem_buffer", ctypes.c_void_p),
("no_alloc", ctypes.c_bool),
]
class GGMLQuants:
libggml: ctypes.CDLL
def __init__(self, libggml: Path):
self.libggml = ctypes.CDLL(str(libggml))
self.libggml.ggml_quantize_chunk.restype = ctypes.c_size_t
# enum ggml_type type,
# const float * src,
# void * dst,
# int64_t start,
# int64_t nrows,
# int64_t n_per_row,
# const float * imatrix) {
self.libggml.ggml_quantize_chunk.argtypes = (
ctypes.c_int,
ctypes.POINTER(ctypes.c_float),
ctypes.c_void_p,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.POINTER(ctypes.c_float),
)
self.libggml.ggml_quantize_requires_imatrix.restype = ctypes.c_bool
self.libggml.ggml_quantize_requires_imatrix.argtypes = (ctypes.c_int,)
for t in (
"q4_0", "q4_1", "q5_0", "q5_1", "q8_0",
"q2_K", "q3_K", "q4_K", "q5_K", "q6_K",
"iq2_xxs", "iq2_xs", "iq2_s", "iq3_xxs", "iq3_s", "iq1_s", "iq1_m",
"iq4_nl", "iq4_xs",
):
dequant_func: ctypes._NamedFuncPointer = getattr(self.libggml, "dequantize_row_" + t)
dequant_func.restype = None
dequant_func.argtypes = (ctypes.c_void_p, ctypes.POINTER(ctypes.c_float), ctypes.c_int64)
self.libggml.ggml_fp16_to_fp32_row.restype = None
self.libggml.ggml_fp16_to_fp32_row.argtypes = (ctypes.POINTER(ctypes.c_uint16), ctypes.POINTER(ctypes.c_float), ctypes.c_int64)
self.libggml.ggml_bf16_to_fp32_row.restype = None
self.libggml.ggml_bf16_to_fp32_row.argtypes = (ctypes.POINTER(ctypes.c_uint16), ctypes.POINTER(ctypes.c_float), ctypes.c_int64)
self.libggml.ggml_init.argtypes = (ggml_init_params,)
self.libggml.ggml_init(ggml_init_params(1 * 1024 * 1024, 0, False))
def dequantize(self, tensor: np.ndarray, qtype: GGMLQuantizationType) -> np.ndarray:
result = np.zeros(gguf.quant_shape_from_byte_shape(tensor.shape, qtype), dtype=np.float32, order="C")
if qtype == GGMLQuantizationType.F32:
# no-op
result = tensor.view(np.float32)
elif qtype == GGMLQuantizationType.F16:
self.libggml.ggml_fp16_to_fp32_row(tensor.ctypes.data_as(ctypes.POINTER(ctypes.c_uint16)), result.ctypes.data_as(c_float_p), result.size)
elif qtype == GGMLQuantizationType.BF16:
self.libggml.ggml_bf16_to_fp32_row(tensor.ctypes.data_as(ctypes.POINTER(ctypes.c_uint16)), result.ctypes.data_as(c_float_p), result.size)
else:
lw_qname = qtype.name.lower()
if lw_qname[-1] == "k":
lw_qname = lw_qname[:-1] + "K"
dequant_func: ctypes._NamedFuncPointer = getattr(self.libggml, "dequantize_row_" + lw_qname)
dequant_func(tensor.ctypes.data_as(ctypes.c_void_p), result.ctypes.data_as(c_float_p), result.size)
return result
def quantize(self, data: np.ndarray, qtype: GGMLQuantizationType) -> np.ndarray:
result = np.zeros(gguf.quant_shape_to_byte_shape(data.shape, qtype), dtype=np.uint8, order="C")
if self.libggml.ggml_quantize_requires_imatrix(qtype.value):
# TODO: is a column-wise sum of squares appropriate?
qw = np.sum((data * data).reshape((-1, data.shape[-1])), axis=0).ctypes.data_as(c_float_p)
else:
qw = ctypes.cast(0, c_float_p)
result_size = self.libggml.ggml_quantize_chunk(qtype.value, data.ctypes.data_as(c_float_p), result.ctypes.data_as(ctypes.c_void_p), 0, prod(data.shape[:-1]), data.shape[-1], qw)
assert result.size == result_size
return result
def compare_tensors(t1: np.ndarray, t2: np.ndarray, qtype: GGMLQuantizationType) -> bool:
same = np.array_equal(t1, t2)
if same:
return True
else:
block_size, type_size = gguf.GGML_QUANT_SIZES[qtype]
if t1.dtype == np.float32:
t1 = t1.reshape((-1, block_size))
t2 = t2.reshape((-1, block_size))
else:
t1 = t1.reshape((-1, type_size))
t2 = t2.reshape((-1, type_size))
x = t1.view(np.uint8) ^ t2.view(np.uint8)
diff_bits = np.count_nonzero(np.unpackbits(x, axis=-1), axis=-1)
num_bad_blocks = np.count_nonzero(diff_bits, axis=0)
if num_bad_blocks == 0 and t1.shape == t2.shape:
logger.debug("Bits are equal, but arrays don't match, likely contains NANs")
return True
logger.debug(f"{num_bad_blocks} bad blocks ({100 * num_bad_blocks / x.shape[0]:.6f}%)")
bad_block_id = np.argmax(diff_bits, axis=0)
logger.debug(f"Worst block id: {bad_block_id}")
logger.debug(f"Sample bad block ({diff_bits[bad_block_id]} differing bits):\n{t1[bad_block_id]}\nReference:\n{t2[bad_block_id]}")
sum_diff_bits = np.sum(diff_bits)
logger.debug(f"{sum_diff_bits} bits differ ({100 * sum_diff_bits/(x.size * 8):.6f}%)")
return False
def do_test(libggml_path: Path, quick: bool = False):
ggml_quants = GGMLQuants(libggml_path)
np.set_printoptions(precision=None, threshold=(4 * 256) + 1, formatter={"int": lambda n: "0x%02X" % n})
r = np.random.randn(8, 1024, 1024).astype(np.float32, copy=False)
for qtype in (GGMLQuantizationType.F16, *gguf.quants._type_traits.keys()):
has_dequantize = False
has_quantize = False
try:
gguf.dequantize(np.zeros((gguf.GGML_QUANT_SIZES[qtype][1]), dtype=np.uint8), qtype)
has_dequantize = True
except (NotImplementedError, AssertionError) as e:
if isinstance(e, AssertionError):
logger.error(f"Error with {qtype.name}: {e}")
raise e
try:
gguf.quantize(np.zeros((gguf.GGML_QUANT_SIZES[qtype][0]), dtype=np.float32), qtype)
has_quantize = True
except (NotImplementedError, AssertionError) as e:
if isinstance(e, AssertionError):
logger.error(f"Error with {qtype.name}: {e}")
raise e
if not has_dequantize and not has_quantize:
continue
logger.info(f"Testing {qtype.name}")
rc = r.copy(order="C")
pyq = None
ggq = None
if has_quantize:
logger.debug(f"Quantizing to {qtype.name} with Python")
pyq = gguf.quants.quantize(rc, qtype)
logger.debug(f"Quantizing to {qtype.name} with C")
ggq = ggml_quants.quantize(rc, qtype)
if qtype == GGMLQuantizationType.F16:
pyq = pyq.view(np.uint8)
quant_equal = compare_tensors(pyq, ggq, qtype)
if not quant_equal:
logger.error(f"Quantization to {qtype.name} does not match ❌")
else:
logger.info(f"Quantization to {qtype.name} matches exactly ✅")
if has_dequantize:
if ggq is None and not quick:
logger.debug(f"Quantizing to {qtype.name} with C")
ggq = ggml_quants.quantize(rc, qtype)
if ggq is not None:
logger.debug(f"Dequantizing from {qtype.name} with Python")
pydq = gguf.quants.dequantize(ggq, qtype)
logger.debug(f"Dequantizing from {qtype.name} with C")
ggdq = ggml_quants.dequantize(ggq, qtype)
dequant_equal = compare_tensors(pydq, ggdq, qtype)
if not dequant_equal:
logger.error(f"Dequantization from {qtype.name} does not match ❌")
else:
logger.info(f"Dequantization from {qtype.name} matches exactly ✅")
rq_shape = gguf.quants.quant_shape_to_byte_shape((8, 1024, 1024 // 2), qtype)
rq = np.random.random(rq_shape).astype(np.float16).view(np.uint8)
logger.debug(f"Dequantizing random f16 data as {qtype.name} with Python")
pydq = gguf.quants.dequantize(rq, qtype)
logger.debug(f"Dequantizing random f16 data as {qtype.name} with C")
ggdq = ggml_quants.dequantize(rq, qtype)
dequant_equal = compare_tensors(pydq, ggdq, qtype)
if not dequant_equal:
logger.error(f"Dequantization from random f16 data as {qtype.name} does not match ❌")
else:
logger.info(f"Dequantization from random f16 data as {qtype.name} matches exactly ✅")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Test Python (de)quantization against the reference C implementation")
parser.add_argument("--libggml", type=Path, default=Path(__file__).parent.parent.parent / "build" / "ggml" / "src" / "libggml.so", help="The path to libggml.so")
parser.add_argument("--quick", action="store_true", help="Don't quantize with C when it's not strictly necessary")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
do_test(args.libggml, args.quick)

View file

@ -95,13 +95,10 @@ extern "C" {
LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22, LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
}; };
// note: these values should be synchronized with ggml_rope
// TODO: maybe move this enum to ggml.h (ggml_rope_type)
enum llama_rope_type { enum llama_rope_type {
LLAMA_ROPE_TYPE_NONE = -1, LLAMA_ROPE_TYPE_NONE = -1,
LLAMA_ROPE_TYPE_NORM = 0, LLAMA_ROPE_TYPE_NORM = 0,
LLAMA_ROPE_TYPE_NEOX = 2, LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX,
LLAMA_ROPE_TYPE_GLM = 4,
}; };
enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file

View file

@ -8046,7 +8046,7 @@ Current version indicated by LITEVER below.
function togglepalmmodel() function togglepalmmodel()
{ {
let mdlname = document.getElementById("custom_palm_model").value; let mdlname = document.getElementById("custom_palm_model").value;
if(mdlname=="gemini-1.5-pro-latest" || mdlname=="gemini-1.5-flash-latest") if(mdlname=="gemini-1.5-pro-latest" || mdlname=="gemini-1.5-flash-latest" || mdlname=="gemini-1.5-pro-exp-0801")
{ {
document.getElementById("gemini_system_instruction").classList.remove("hidden"); document.getElementById("gemini_system_instruction").classList.remove("hidden");
if(localsettings.saved_palm_jailbreak=="") if(localsettings.saved_palm_jailbreak=="")
@ -12451,7 +12451,7 @@ Current version indicated by LITEVER below.
}; };
let sysinst = document.getElementById("gemini_system_instruction").value; let sysinst = document.getElementById("gemini_system_instruction").value;
if(sysinst!="" && (mdlname=="gemini-1.5-pro-latest" || mdlname=="gemini-1.5-flash-latest")) if(sysinst!="" && (mdlname=="gemini-1.5-pro-latest" || mdlname=="gemini-1.5-flash-latest" || mdlname=="gemini-1.5-pro-exp-0801"))
{ {
payload["systemInstruction"] = { payload["systemInstruction"] = {
"role": "system", "role": "system",
@ -18176,6 +18176,7 @@ Current version indicated by LITEVER below.
<option value="10">10x</option> <option value="10">10x</option>
</select> </select>
<select title="Idle Responses Duration" style="padding:1px; height:auto; width: 27px; appearance: none; font-size: 7pt;" class="form-control" id="idle_duration"> <select title="Idle Responses Duration" style="padding:1px; height:auto; width: 27px; appearance: none; font-size: 7pt;" class="form-control" id="idle_duration">
<option value="5">5s</option>
<option value="15">15s</option> <option value="15">15s</option>
<option value="30">30s</option> <option value="30">30s</option>
<option value="60">60s</option> <option value="60">60s</option>

View file

@ -85,14 +85,14 @@ void llama_sample_top_k_impl(struct llama_sampling * smpl, llama_token_data_arra
constexpr float bucket_low = -10.0f; constexpr float bucket_low = -10.0f;
constexpr float bucket_high = 10.0f; constexpr float bucket_high = 10.0f;
constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low); constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low);
constexpr float bucker_inter = -bucket_low * bucket_scale; constexpr float bucket_inter = -bucket_low * bucket_scale;
std::vector<int> bucket_idx(candidates->size); std::vector<int> bucket_idx(candidates->size);
std::vector<int> histo(nbuckets, 0); std::vector<int> histo(nbuckets, 0);
for (int i = 0; i < (int)candidates->size; ++i) { for (int i = 0; i < (int)candidates->size; ++i) {
const float val = candidates->data[i].logit; const float val = candidates->data[i].logit;
int ib = int(bucket_scale * val + bucker_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low); int ib = int(bucket_scale * val + bucket_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low);
ib = std::max(0, std::min(nbuckets-1, ib)); ib = std::max(0, std::min(nbuckets-1, ib));
bucket_idx[i] = ib; bucket_idx[i] = ib;
++histo[ib]; ++histo[ib];

View file

@ -3594,13 +3594,8 @@ namespace GGUFMeta {
using llama_buf_map = std::unordered_map<uint32_t, ggml_backend_buffer_t>; using llama_buf_map = std::unordered_map<uint32_t, ggml_backend_buffer_t>;
// TODO: update when needed or think of some clever automatic way to do this static size_t llama_model_max_nodes(const llama_model & model) {
static size_t llama_model_max_nodes(const llama_model & /*model*/) { return std::max<size_t>(8192, model.tensors_by_name.size()*5);
//if (model.arch == LLM_ARCH_LLAMA && model.hparams.n_layer > ??) { // llama-3 405B
// return 32768;
//}
return 8192;
} }
struct llama_model_loader { struct llama_model_loader {
@ -14801,11 +14796,14 @@ static int llama_decode_internal(
embd = nullptr; embd = nullptr;
} else if (cparams.embeddings) { } else if (cparams.embeddings) {
res = nullptr; // do not extract logits for embedding case res = nullptr; // do not extract logits for embedding case
embd = gf->nodes[gf->n_nodes - 1]; embd = nullptr;
if (strcmp(embd->name, "result_embd_pooled") != 0) { for (int i = gf->n_nodes - 1; i >= 0; --i) {
embd = gf->nodes[gf->n_nodes - 2]; if (strcmp(gf->nodes[i]->name, "result_embd_pooled") == 0) {
embd = gf->nodes[i];
break;
} }
GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0 && "missing embeddings tensor"); }
GGML_ASSERT(embd != nullptr && "missing embeddings tensor");
} else { } else {
embd = nullptr; // do not extract embeddings when not needed embd = nullptr; // do not extract embeddings when not needed
GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor"); GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");