mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 01:24:36 +00:00
integrated q5 formats
This commit is contained in:
parent
e8a389f85b
commit
032a171867
13 changed files with 184 additions and 129 deletions
|
@ -1,6 +1,4 @@
|
|||
#include "ggml.h"
|
||||
|
||||
#include "otherarch/utils.h"
|
||||
#include "utils.h"
|
||||
#include "common-ggml.h"
|
||||
|
||||
#include <cassert>
|
||||
|
@ -20,11 +18,11 @@ struct gpt2_hparams {
|
|||
int32_t n_embd = 768;
|
||||
int32_t n_head = 12;
|
||||
int32_t n_layer = 12;
|
||||
int32_t f16 = 1;
|
||||
int32_t ftype = 1;
|
||||
};
|
||||
|
||||
// quantize a model
|
||||
bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_mtype mtype) {
|
||||
bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_ftype ftype) {
|
||||
gpt_vocab vocab;
|
||||
|
||||
printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
|
||||
|
@ -62,21 +60,21 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam
|
|||
finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
||||
finp.read((char *) &hparams.n_head, sizeof(hparams.n_head));
|
||||
finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
||||
finp.read((char *) &hparams.f16, sizeof(hparams.f16));
|
||||
finp.read((char *) &hparams.ftype, sizeof(hparams.ftype));
|
||||
|
||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
||||
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
||||
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
||||
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
||||
printf("%s: f16 = %d\n", __func__, hparams.f16);
|
||||
printf("%s: f16 = %d\n", __func__, hparams.ftype);
|
||||
|
||||
fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
|
||||
fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
|
||||
fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
||||
fout.write((char *) &hparams.n_head, sizeof(hparams.n_head));
|
||||
fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
||||
fout.write((char *) &mtype, sizeof(hparams.f16));
|
||||
fout.write((char *) &ftype, sizeof(hparams.ftype));
|
||||
}
|
||||
|
||||
// load vocab
|
||||
|
@ -116,7 +114,7 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam
|
|||
"model/h.*/mlp/c_proj/w",
|
||||
};
|
||||
|
||||
if (!ggml_common_quantize_0(finp, fout, mtype, to_quant, {})) {
|
||||
if (!ggml_common_quantize_0(finp, fout, ftype, to_quant, {})) {
|
||||
fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str());
|
||||
return false;
|
||||
}
|
||||
|
@ -134,10 +132,7 @@ int main(int argc, char ** argv) {
|
|||
ggml_time_init();
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
|
||||
fprintf(stderr, " type = 2 - q4_0\n");
|
||||
fprintf(stderr, " type = 3 - q4_1\n");
|
||||
fprintf(stderr, " type = 5 - q4_2\n");
|
||||
fprintf(stderr, " type = 6 - q4_3\n");
|
||||
ggml_print_ftypes(stderr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -151,7 +146,7 @@ int main(int argc, char ** argv) {
|
|||
const std::string fname_inp = argv[1];
|
||||
const std::string fname_out = argv[2];
|
||||
|
||||
const int mtype = atoi(argv[3]);
|
||||
const ggml_ftype ftype = ggml_parse_ftype(argv[3]);
|
||||
|
||||
const int64_t t_main_start_us = ggml_time_us();
|
||||
|
||||
|
@ -161,7 +156,7 @@ int main(int argc, char ** argv) {
|
|||
{
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (!gpt2_model_quantize(fname_inp, fname_out, ggml_mtype(mtype))) {
|
||||
if (!gpt2_model_quantize(fname_inp, fname_out, ggml_ftype(ftype))) {
|
||||
fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
|
||||
return 1;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue