diff --git a/expose.cpp b/expose.cpp index 104e71043..8b874cbb2 100644 --- a/expose.cpp +++ b/expose.cpp @@ -103,7 +103,7 @@ extern "C" return true; } } - else if(file_format==FileFormat::GPT2_1||file_format==FileFormat::GPT2_2||file_format==FileFormat::GPT2_3) + else if(file_format==FileFormat::GPT2_1||file_format==FileFormat::GPT2_2||file_format==FileFormat::GPT2_3||file_format==FileFormat::GPT2_4) { printf("\n---\nIdentified as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format); ModelLoadResult lr = gpttype_load_model(inputs, file_format); @@ -141,7 +141,7 @@ extern "C" return true; } } - else if(file_format==FileFormat::NEOX_1 || file_format==FileFormat::NEOX_2 || file_format==FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5|| file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7) + else if(file_format==FileFormat::NEOX_1 || file_format==FileFormat::NEOX_2 || file_format==FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5 || file_format==FileFormat::NEOX_6 || file_format==FileFormat::NEOX_7) { printf("\n---\nIdentified as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format); ModelLoadResult lr = gpttype_load_model(inputs, file_format); diff --git a/otherarch/tools/gpt2_quantize.cpp b/otherarch/tools/gpt2_quantize.cpp index a49de5899..2c0b908f9 100644 --- a/otherarch/tools/gpt2_quantize.cpp +++ b/otherarch/tools/gpt2_quantize.cpp @@ -135,7 +135,6 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam // ./gpt-2-quantize models/gpt-2-117M/ggml-model.bin models/gpt-2-117M/ggml-model-quant.bin type // int main(int argc, char ** argv) { - ggml_time_init(); if (argc != 4) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); ggml_print_ftypes(stderr);