diff --git a/common/arg.cpp b/common/arg.cpp index 1e49964a2..bbb70ccbe 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1284,7 +1284,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.use_color = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP})); + ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP})); add_opt(common_arg( {"-t", "--threads"}, "N", string_format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads), @@ -1417,7 +1417,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex add_opt(common_arg( {"-n", "--predict", "--n-predict"}, "N", string_format( - ex == LLAMA_EXAMPLE_MAIN || ex == LLAMA_EXAMPLE_INFILL + ex == LLAMA_EXAMPLE_MAIN ? "number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)" : "number of tokens to predict (default: %d, -1 = infinity)", params.n_predict), @@ -1656,7 +1656,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.input_prefix = value; params.enable_chat_template = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL})); + ).set_examples({LLAMA_EXAMPLE_MAIN})); add_opt(common_arg( {"--in-suffix"}, "STRING", "string to suffix after user inputs with (default: empty)", @@ -1664,7 +1664,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.input_suffix = value; params.enable_chat_template = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL})); + ).set_examples({LLAMA_EXAMPLE_MAIN})); add_opt(common_arg( {"--no-warmup"}, "skip warming up the model with an empty run", @@ -1681,7 +1681,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.spm_infill = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL})); + ).set_examples({LLAMA_EXAMPLE_SERVER})); add_opt(common_arg( {"--samplers"}, "SAMPLERS", string_format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()), @@ -2893,7 +2893,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.simple_io = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL})); + ).set_examples({LLAMA_EXAMPLE_MAIN})); add_opt(common_arg( {"--positive-file"}, "FNAME", string_format("positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str()), diff --git a/common/common.h b/common/common.h index 236dee33e..ff6225389 100644 --- a/common/common.h +++ b/common/common.h @@ -62,7 +62,6 @@ enum llama_example { LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_MAIN, - LLAMA_EXAMPLE_INFILL, LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL, diff --git a/common/sampling.cpp b/common/sampling.cpp index bbaec5b80..28705e24c 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -1,6 +1,7 @@ #include "sampling.h" #include "common.h" +#include "log.h" #include #include @@ -534,14 +535,16 @@ std::vector common_sampler_types_from_names(const std::vect auto sampler = sampler_canonical_name_map.find(name); if (sampler != sampler_canonical_name_map.end()) { samplers.push_back(sampler->second); - } else { - if (allow_alt_names) { - sampler = sampler_alt_name_map.find(name); - if (sampler != sampler_alt_name_map.end()) { - samplers.push_back(sampler->second); - } + continue; + } + if (allow_alt_names) { + sampler = sampler_alt_name_map.find(name); + if (sampler != sampler_alt_name_map.end()) { + samplers.push_back(sampler->second); + continue; } } + LOG_WRN("%s: unable to match sampler by name '%s'\n", __func__, name.c_str()); } return samplers; @@ -568,6 +571,8 @@ std::vector common_sampler_types_from_chars(const std::stri const auto sampler = sampler_name_map.find(c); if (sampler != sampler_name_map.end()) { samplers.push_back(sampler->second); + } else { + LOG_WRN("%s: unable to match sampler by char '%c'\n", __func__, c); } } diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp index cee128b13..983f5cfed 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp @@ -87,8 +87,6 @@ static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wro #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Woverlength-strings" -#elif defined(_MSC_VER) -#pragma warning(disable: 4244 4267) // possible loss of data #endif #define UNUSED GGML_UNUSED diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c index bb52be799..87e9e380c 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ b/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -20,12 +20,6 @@ #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f -#if defined(_MSC_VER) -// disable "possible loss of data" to avoid warnings for hundreds of casts -// we should just be careful :) -#pragma warning(disable: 4244 4267) -#endif - #define UNUSED GGML_UNUSED #ifndef MM256_SET_M128I diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 165bff28f..a59c5862d 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -51,19 +51,6 @@ #include "llamafile/sgemm.h" #endif -#if defined(_MSC_VER) -// disable "possible loss of data" to avoid hundreds of casts -// we should just be careful :) -#pragma warning(disable: 4244 4267) - -// disable POSIX deprecation warnings -// these functions are never going away, anyway -#pragma warning(disable: 4996) - -// unreachable code because of multiple instances of code after GGML_ABORT -#pragma warning(disable: 4702) -#endif - // Note: once we move threading into a separate C++ file // will use std::hardware_destructive_interference_size instead of hardcoding it here // and we'll use C++ attribute syntax. diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 7413192b7..955fec59a 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -8,19 +8,6 @@ #include -#if defined(_MSC_VER) -// disable "possible loss of data" to avoid hundreds of casts -// we should just be careful :) -#pragma warning(disable: 4244 4267) - -// disable POSIX deprecation warnings -// these functions are never going away, anyway -#pragma warning(disable: 4996) - -// unreachable code because of multiple instances of code after GGML_ABORT -#pragma warning(disable: 4702) -#endif - // ggml_compute_forward_dup static void ggml_compute_forward_dup_same_cont( diff --git a/ggml/src/ggml-cpu/vec.cpp b/ggml/src/ggml-cpu/vec.cpp index dfe2218e3..02d406182 100644 --- a/ggml/src/ggml-cpu/vec.cpp +++ b/ggml/src/ggml-cpu/vec.cpp @@ -2,12 +2,6 @@ #include -#if defined(_MSC_VER) -// disable "possible loss of data" to avoid hundreds of casts -// we should just be careful :) -#pragma warning(disable: 4244 4267) -#endif - // precomputed gelu table for f16 (128 KB) ggml_fp16_t ggml_table_gelu_f16[1 << 16]; diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 5f504bda7..11f6bb4d2 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -130,10 +130,6 @@ static int ggml_cuda_highest_compiled_arch(const int arch) { #define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses -#if defined(_MSC_VER) -#pragma warning(disable: 4244 4267) // possible loss of data -#endif - #define GGML_CUDA_MAX_STREAMS 8 [[noreturn]] diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh index 7cc48cb05..bae1a58c3 100644 --- a/ggml/src/ggml-cuda/mmq.cuh +++ b/ggml/src/ggml-cuda/mmq.cuh @@ -2523,7 +2523,7 @@ template static __device__ __forceinline__ void mul_mat_q_process_tile( const char * __restrict__ x, const int offset_x, const int * __restrict__ y, const int * __restrict__ ids_dst, float * __restrict__ dst, float * __restrict__ tmp_fixup, - const int nrows_x, const int stride_row_x, const int ncols_y, const int stride_col_dst, + const int stride_row_x, const int ncols_y, const int stride_col_dst, const int tile_x_max_i, const int tile_y_max_j, const int kb0_start, const int kb0_stop) { constexpr int qk = ggml_cuda_type_traits::qk; @@ -2690,7 +2690,7 @@ static __global__ void mul_mat_q( constexpr bool fixup = false; mul_mat_q_process_tile - (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, nrows_x, stride_row_x, ncols_y, stride_col_dst, + (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, stride_row_x, ncols_y, stride_col_dst, tile_x_max_i, tile_y_max_j, 0, ncols_x/qk); return; } @@ -2768,7 +2768,7 @@ static __global__ void mul_mat_q( constexpr bool fixup = false; // All but (potentially) the last iterations write their data to dst rather than the fixup buffer. mul_mat_q_process_tile - (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, nrows_x, stride_row_x, ncols_y, stride_col_dst, + (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, stride_row_x, ncols_y, stride_col_dst, tile_x_max_i, tile_y_max_j, kb0_start, kb0_stop); kbc += blocks_per_ne00; @@ -2835,7 +2835,7 @@ static __global__ void mul_mat_q( constexpr bool fixup = true; // Last index writes its data to fixup buffer to avoid data races with other blocks. mul_mat_q_process_tile - (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, nrows_x, stride_row_x, ncols_y, stride_col_dst, + (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, stride_row_x, ncols_y, stride_col_dst, tile_x_max_i, tile_y_max_j, kb0_start, kb0_stop); } diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c index ac918a60d..84ec6dfe3 100644 --- a/ggml/src/ggml-quants.c +++ b/ggml/src/ggml-quants.c @@ -19,12 +19,6 @@ #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f -#if defined(_MSC_VER) -// disable "possible loss of data" to avoid warnings for hundreds of casts -// we should just be careful :) -#pragma warning(disable: 4244 4267) -#endif - #define UNUSED GGML_UNUSED // reference implementation for deterministic creation of model files diff --git a/klite.embd b/klite.embd index e74cdbb64..450f28cbf 100644 --- a/klite.embd +++ b/klite.embd @@ -12,7 +12,7 @@ Current version indicated by LITEVER below. -->