fixed quant tools not compiling, updated docs

This commit is contained in:
Concedo 2024-04-06 23:11:05 +08:00
parent 273d48ad96
commit 0061299cce
8 changed files with 228 additions and 132 deletions

View file

@ -1,4 +1,4 @@
#include "utils.h"
#include "otherarch/utils.h"
#include "common-ggml.h"
#include <cassert>
@ -22,7 +22,7 @@ struct gpt2_hparams {
};
// quantize a model
bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_ftype ftype) {
bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_v3_ftype ftype) {
gpt_vocab vocab;
printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
@ -62,8 +62,8 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam
finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
finp.read((char *) &hparams.ftype, sizeof(hparams.ftype));
const int32_t qntvr_src = hparams.ftype / GGML_QNT_VERSION_FACTOR;
const int32_t ftype_dst = GGML_QNT_VERSION * GGML_QNT_VERSION_FACTOR + ftype;
const int32_t qntvr_src = hparams.ftype / GGML_V3_QNT_VERSION_FACTOR;
const int32_t ftype_dst = GGML_V3_QNT_VERSION * GGML_V3_QNT_VERSION_FACTOR + ftype;
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
@ -73,7 +73,7 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam
printf("%s: ftype (src) = %d\n", __func__, hparams.ftype);
printf("%s: qntvr (src) = %d\n", __func__, qntvr_src);
printf("%s: ftype (dst) = %d\n", __func__, ftype_dst);
printf("%s: qntvr (dst) = %d\n", __func__, GGML_QNT_VERSION);
printf("%s: qntvr (dst) = %d\n", __func__, GGML_V3_QNT_VERSION);
fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
@ -120,7 +120,7 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam
"model/h.*/mlp/c_proj/w",
};
if (!ggml_common_quantize_0(finp, fout, ftype, to_quant, {})) {
if (!ggml_v3_common_quantize_0(finp, fout, ftype, to_quant, {})) {
fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str());
return false;
}
@ -137,41 +137,41 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam
int main(int argc, char ** argv) {
if (argc != 4) {
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
ggml_print_ftypes(stderr);
ggml_v3_print_ftypes(stderr);
return 1;
}
// needed to initialize f16 tables
{
struct ggml_init_params params = { 0, NULL, false };
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
struct ggml_v3_init_params params = { 0, NULL, false };
struct ggml_v3_context * ctx = ggml_v3_init(params);
ggml_v3_free(ctx);
}
const std::string fname_inp = argv[1];
const std::string fname_out = argv[2];
const ggml_ftype ftype = ggml_parse_ftype(argv[3]);
const ggml_v3_ftype ftype = ggml_v3_parse_ftype(argv[3]);
const int64_t t_main_start_us = ggml_time_us();
const int64_t t_main_start_us = ggml_v3_time_us();
int64_t t_quantize_us = 0;
// load the model
{
const int64_t t_start_us = ggml_time_us();
const int64_t t_start_us = ggml_v3_time_us();
if (!gpt2_model_quantize(fname_inp, fname_out, ggml_ftype(ftype))) {
if (!gpt2_model_quantize(fname_inp, fname_out, ggml_v3_ftype(ftype))) {
fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
return 1;
}
t_quantize_us = ggml_time_us() - t_start_us;
t_quantize_us = ggml_v3_time_us() - t_start_us;
}
// report timing
{
const int64_t t_main_end_us = ggml_time_us();
const int64_t t_main_end_us = ggml_v3_time_us();
printf("\n");
printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f);