diff --git a/Makefile b/Makefile index b564da75d..71a86e6a8 100644 --- a/Makefile +++ b/Makefile @@ -51,6 +51,10 @@ ifdef KCPP_DEBUG CFLAGS = -g -O0 CXXFLAGS = -g -O0 endif +ifdef KCPP_SANITIZE + CFLAGS += -fsanitize=undefined -fsanitize-undefined-trap-on-error + CXXFLAGS += -fsanitize=undefined -fsanitize-undefined-trap-on-error +endif CFLAGS += -I. -Iggml/include -Iggml/src -Iggml/src/ggml-cpu -Iinclude -Isrc -I./common -I./vendor -I./vendor/stb -I./include -I./include/CL -I./otherarch -I./otherarch/tools -I./otherarch/sdcpp -I./otherarch/sdcpp/thirdparty -I./include/vulkan -O3 -fno-finite-math-only -std=c11 -fPIC -DLOG_DISABLE_LOGS -D_GNU_SOURCE -DGGML_USE_CPU -DGGML_USE_CPU_REPACK CXXFLAGS += -I. -Iggml/include -Iggml/src -Iggml/src/ggml-cpu -Iinclude -Isrc -I./common -I./vendor -I./vendor/stb -I./include -I./include/CL -I./otherarch -I./otherarch/tools -I./otherarch/sdcpp -I./otherarch/sdcpp/thirdparty -I./include/vulkan -O3 -fno-finite-math-only -std=c++17 -fPIC -DLOG_DISABLE_LOGS -D_GNU_SOURCE -DGGML_USE_CPU -DGGML_USE_CPU_REPACK ifndef KCPP_DEBUG diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index 409f3bd6a..d0d04d8da 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -1189,7 +1189,12 @@ std::string llama_model_loader::ftype_name() const { void llama_model_loader::print_info() const { LLAMA_LOG_INFO("%s: file format = %s\n", __func__, llama_file_version_name(fver)); - LLAMA_LOG_INFO("%s: file type = %s\n", __func__, llama_model_ftype_name(ftype).c_str()); + //LLAMA_LOG_INFO("%s: file type = %s\n", __func__, llama_model_ftype_name(ftype).c_str()); + if (n_elements <= 0) { + LLAMA_LOG_INFO("%s: file size = %.2f MiB\n", __func__, n_bytes/1024.0/1024.0); + LLAMA_LOG_INFO("%s: ERROR: n_elements is invalid, cannot compute BPW\n", __func__); + return; + } if (n_bytes < GiB) { LLAMA_LOG_INFO("%s: file size = %.2f MiB (%.2f BPW) \n", __func__, n_bytes/1024.0/1024.0, n_bytes*8.0/n_elements); } else {