From 2e42be42bd6bf1dcc643d6ac4e77419bfe5dd24f Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Sat, 14 Jun 2025 16:34:20 +0800 Subject: [PATCH 01/26] compare-llama-bench: add option to plot (#14169) * compare llama-bench: add option to plot * Address review comments: convert case + add type hints * Add matplotlib to requirements * fix tests * Improve comment and fix assert condition for test * Add back default test_name, add --plot_log_scale * use log_scale regardless of x_values --- .../requirements-compare-llama-bench.txt | 1 + scripts/compare-llama-bench.py | 169 +++++++++++++++++- 2 files changed, 169 insertions(+), 1 deletion(-) diff --git a/requirements/requirements-compare-llama-bench.txt b/requirements/requirements-compare-llama-bench.txt index e0aaa3204..d87e897e1 100644 --- a/requirements/requirements-compare-llama-bench.txt +++ b/requirements/requirements-compare-llama-bench.txt @@ -1,2 +1,3 @@ tabulate~=0.9.0 GitPython~=3.1.43 +matplotlib~=3.10.0 diff --git a/scripts/compare-llama-bench.py b/scripts/compare-llama-bench.py index a1013c3b7..30e3cf864 100755 --- a/scripts/compare-llama-bench.py +++ b/scripts/compare-llama-bench.py @@ -19,6 +19,7 @@ except ImportError as e: print("the following Python libraries are required: GitPython, tabulate.") # noqa: NP100 raise e + logger = logging.getLogger("compare-llama-bench") # All llama-bench SQL fields @@ -122,11 +123,15 @@ help_s = ( parser.add_argument("--check", action="store_true", help="check if all required Python libraries are installed") parser.add_argument("-s", "--show", help=help_s) parser.add_argument("--verbose", action="store_true", help="increase output verbosity") +parser.add_argument("--plot", help="generate a performance comparison plot and save to specified file (e.g., plot.png)") +parser.add_argument("--plot_x", help="parameter to use as x axis for plotting (default: n_depth)", default="n_depth") +parser.add_argument("--plot_log_scale", action="store_true", help="use log scale for x axis in plots (off by default)") known_args, unknown_args = parser.parse_known_args() logging.basicConfig(level=logging.DEBUG if known_args.verbose else logging.INFO) + if known_args.check: # Check if all required Python libraries are installed. Would have failed earlier if not. sys.exit(0) @@ -499,7 +504,6 @@ else: name_compare = bench_data.get_commit_name(hexsha8_compare) - # If the user provided columns to group the results by, use them: if known_args.show is not None: show = known_args.show.split(",") @@ -544,6 +548,14 @@ else: show.remove(prop) except ValueError: pass + + # Add plot_x parameter to parameters to show if it's not already present: + if known_args.plot: + for k, v in PRETTY_NAMES.items(): + if v == known_args.plot_x and k not in show: + show.append(k) + break + rows_show = bench_data.get_rows(show, hexsha8_baseline, hexsha8_compare) if not rows_show: @@ -600,6 +612,161 @@ if "gpu_info" in show: headers = [PRETTY_NAMES[p] for p in show] headers += ["Test", f"t/s {name_baseline}", f"t/s {name_compare}", "Speedup"] +if known_args.plot: + def create_performance_plot(table_data: list[list[str]], headers: list[str], baseline_name: str, compare_name: str, output_file: str, plot_x_param: str, log_scale: bool = False): + try: + import matplotlib.pyplot as plt + import matplotlib + matplotlib.use('Agg') + except ImportError as e: + logger.error("matplotlib is required for --plot.") + raise e + + data_headers = headers[:-4] # Exclude the last 4 columns (Test, baseline t/s, compare t/s, Speedup) + plot_x_index = None + plot_x_label = plot_x_param + + if plot_x_param not in ["n_prompt", "n_gen", "n_depth"]: + pretty_name = PRETTY_NAMES.get(plot_x_param, plot_x_param) + if pretty_name in data_headers: + plot_x_index = data_headers.index(pretty_name) + plot_x_label = pretty_name + elif plot_x_param in data_headers: + plot_x_index = data_headers.index(plot_x_param) + plot_x_label = plot_x_param + else: + logger.error(f"Parameter '{plot_x_param}' not found in current table columns. Available columns: {', '.join(data_headers)}") + return + + grouped_data = {} + + for i, row in enumerate(table_data): + group_key_parts = [] + test_name = row[-4] + + base_test = "" + x_value = None + + if plot_x_param in ["n_prompt", "n_gen", "n_depth"]: + for j, val in enumerate(row[:-4]): + header_name = data_headers[j] + if val is not None and str(val).strip(): + group_key_parts.append(f"{header_name}={val}") + + if plot_x_param == "n_prompt" and "pp" in test_name: + base_test = test_name.split("@")[0] + x_value = base_test + elif plot_x_param == "n_gen" and "tg" in test_name: + x_value = test_name.split("@")[0] + elif plot_x_param == "n_depth" and "@d" in test_name: + base_test = test_name.split("@d")[0] + x_value = int(test_name.split("@d")[1]) + else: + base_test = test_name + + if base_test.strip(): + group_key_parts.append(f"Test={base_test}") + else: + for j, val in enumerate(row[:-4]): + if j != plot_x_index: + header_name = data_headers[j] + if val is not None and str(val).strip(): + group_key_parts.append(f"{header_name}={val}") + else: + x_value = val + + group_key_parts.append(f"Test={test_name}") + + group_key = tuple(group_key_parts) + + if group_key not in grouped_data: + grouped_data[group_key] = [] + + grouped_data[group_key].append({ + 'x_value': x_value, + 'baseline': float(row[-3]), + 'compare': float(row[-2]), + 'speedup': float(row[-1]) + }) + + if not grouped_data: + logger.error("No data available for plotting") + return + + def make_axes(num_groups, max_cols=2, base_size=(8, 4)): + from math import ceil + cols = 1 if num_groups == 1 else min(max_cols, num_groups) + rows = ceil(num_groups / cols) + + # Scale figure size by grid dimensions + w, h = base_size + fig, ax_arr = plt.subplots(rows, cols, + figsize=(w * cols, h * rows), + squeeze=False) + + axes = ax_arr.flatten()[:num_groups] + return fig, axes + + num_groups = len(grouped_data) + fig, axes = make_axes(num_groups) + + plot_idx = 0 + + for group_key, points in grouped_data.items(): + if plot_idx >= len(axes): + break + ax = axes[plot_idx] + + try: + points_sorted = sorted(points, key=lambda p: float(p['x_value']) if p['x_value'] is not None else 0) + x_values = [float(p['x_value']) if p['x_value'] is not None else 0 for p in points_sorted] + except ValueError: + points_sorted = sorted(points, key=lambda p: group_key) + x_values = [p['x_value'] for p in points_sorted] + + baseline_vals = [p['baseline'] for p in points_sorted] + compare_vals = [p['compare'] for p in points_sorted] + + ax.plot(x_values, baseline_vals, 'o-', color='skyblue', + label=f'{baseline_name}', linewidth=2, markersize=6) + ax.plot(x_values, compare_vals, 's--', color='lightcoral', alpha=0.8, + label=f'{compare_name}', linewidth=2, markersize=6) + + if log_scale: + ax.set_xscale('log', base=2) + unique_x = sorted(set(x_values)) + ax.set_xticks(unique_x) + ax.set_xticklabels([str(int(x)) for x in unique_x]) + + title_parts = [] + for part in group_key: + if '=' in part: + key, value = part.split('=', 1) + title_parts.append(f"{key}: {value}") + + title = ', '.join(title_parts) if title_parts else "Performance comparison" + + ax.set_xlabel(plot_x_label, fontsize=12, fontweight='bold') + ax.set_ylabel('Tokens per second (t/s)', fontsize=12, fontweight='bold') + ax.set_title(title, fontsize=12, fontweight='bold') + ax.legend(loc='best', fontsize=10) + ax.grid(True, alpha=0.3) + + plot_idx += 1 + + for i in range(plot_idx, len(axes)): + axes[i].set_visible(False) + + fig.suptitle(f'Performance comparison: {compare_name} vs. {baseline_name}', + fontsize=14, fontweight='bold') + fig.subplots_adjust(top=1) + + plt.tight_layout() + plt.savefig(output_file, dpi=300, bbox_inches='tight') + plt.close() + + create_performance_plot(table, headers, name_baseline, name_compare, known_args.plot, known_args.plot_x, known_args.plot_log_scale) + print(tabulate( # noqa: NP100 table, headers=headers, From 3cb203c89f60483e349f841684173446ed23c28f Mon Sep 17 00:00:00 2001 From: Piotr Date: Sat, 14 Jun 2025 18:25:15 +0200 Subject: [PATCH 02/26] llama-chat : Do not throw when tool parsing fails (#14012) Currently when a model generates output which looks like a tool call, but is invalid an exception is thrown and not handled, causing the cli or llama-server to bail. Instead, handle the chat parser exception and simply return the generated text in such cases. Signed-off-by: Piotr Stankiewicz --- common/chat-parser.cpp | 5 +++++ common/chat-parser.h | 2 ++ common/chat.cpp | 4 +++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/common/chat-parser.cpp b/common/chat-parser.cpp index 65b664cb3..18a30e49a 100644 --- a/common/chat-parser.cpp +++ b/common/chat-parser.cpp @@ -49,6 +49,7 @@ bool common_chat_msg_parser::add_tool_call(const std::string & name, const std:: // LOG_DBG("Tool call arguments:\n\traw: %s\n\tresult: %s\n", arguments.c_str(), tool_call.arguments.c_str()); result_.tool_calls.emplace_back(tool_call); + return true; } bool common_chat_msg_parser::add_tool_call(const json & tool_call) { @@ -378,3 +379,7 @@ std::optional common_chat_msg_parse /* .is_partial = */ found_healing_marker, }; } + +void common_chat_msg_parser::clear_tools() { + result_.tool_calls.clear(); +} diff --git a/common/chat-parser.h b/common/chat-parser.h index 7ee355056..0e64c341a 100644 --- a/common/chat-parser.h +++ b/common/chat-parser.h @@ -115,4 +115,6 @@ class common_chat_msg_parser { const std::vector> & args_paths = {}, const std::vector> & content_paths = {} ); + + void clear_tools(); }; diff --git a/common/chat.cpp b/common/chat.cpp index 1d6974a8c..0dad14fba 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1921,7 +1921,9 @@ common_chat_msg common_chat_parse(const std::string & input, bool is_partial, co } catch (const common_chat_msg_partial_exception & ex) { LOG_DBG("Partial parse: %s\n", ex.what()); if (!is_partial) { - throw std::runtime_error(ex.what()); + builder.clear_tools(); + builder.move_to(0); + common_chat_parse_content_only(builder); } } auto msg = builder.result(); From 00ba7726100d7e1941d9f5a06f56a7559945b33c Mon Sep 17 00:00:00 2001 From: Pepijn de Vos Date: Sun, 15 Jun 2025 08:06:37 +0200 Subject: [PATCH 03/26] docs : remove WIP since PR has been merged (#13912) --- docs/function-calling.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/function-calling.md b/docs/function-calling.md index fd3db9bd1..37eacaf31 100644 --- a/docs/function-calling.md +++ b/docs/function-calling.md @@ -11,7 +11,7 @@ Function calling is supported for all models (see https://github.com/ggml-org/ll - Llama 3.1 / 3.3 (including builtin tools support - tool names for `wolfram_alpha`, `web_search` / `brave_search`, `code_interpreter`), Llama 3.2 - Functionary v3.1 / v3.2 - Hermes 2/3, Qwen 2.5 - - Qwen 2.5 Coder (WIP: https://github.com/ggml-org/llama.cpp/pull/12034) + - Qwen 2.5 Coder - Mistral Nemo - Firefunction v2 - Command R7B From b9912ac570de8945ae9383c9ca8291027bf287dd Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 15 Jun 2025 09:18:37 +0300 Subject: [PATCH 04/26] batch : auto-gen positions + verify multi-sequence input (#14177) * batch : verify multi-sequence input batches ggml-ci * cont : auto-gen positions + verify multi-seq input ggml-ci * cont : first print debug info, then perform validation ggml-ci * cont : fix position auto-gen + add comments ggml-ci --- include/llama.h | 4 +- src/llama-batch.cpp | 153 +++++++++++++++++++++++++++++++++++++----- src/llama-batch.h | 17 ++++- src/llama-context.cpp | 6 +- src/llama-cparams.h | 1 + 5 files changed, 155 insertions(+), 26 deletions(-) diff --git a/include/llama.h b/include/llama.h index 015a57898..d5e4cef68 100644 --- a/include/llama.h +++ b/include/llama.h @@ -243,14 +243,14 @@ extern "C" { typedef bool (*llama_progress_callback)(float progress, void * user_data); - // Input data for llama_decode + // Input data for llama_encode/llama_decode // A llama_batch object can contain input about one or many sequences // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens // // - token : the token ids of the input (used when embd is NULL) // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL) // - pos : the positions of the respective token in the sequence - // (if set to NULL, the token position will be tracked automatically by llama_decode) + // (if set to NULL, the token position will be tracked automatically by llama_encode/llama_decode) // - seq_id : the sequence to which the respective token belongs // (if set to NULL, the sequence ID will be assumed to be 0) // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index bdbf76626..2265db9b2 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -3,6 +3,7 @@ #include "llama-impl.h" #include "llama-cparams.h" #include "llama-vocab.h" +#include "llama-memory.h" #include #include @@ -287,21 +288,27 @@ llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple llama_batch_allocr::llama_batch_allocr() { const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG"); debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0; + + seq_pos.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + seq_cpl.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + for (auto & cur : seq_cpl) { + cur.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + } } -bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & vocab, llama_pos p0) { +bool llama_batch_allocr::init( + const llama_batch & batch_inp, + const llama_vocab & vocab, + const llama_memory_i * memory) { clear(); batch = batch_inp; GGML_ASSERT(batch.n_tokens > 0); - if (!batch.pos) { - if (batch.seq_id) { - LLAMA_LOG_ERROR("%s: pos == NULL, but seq_id != NULL\n", __func__); - return false; - } - } + // + // validate input batch + // if (batch.token) { for (int32_t i = 0; i < batch.n_tokens; ++i) { @@ -323,14 +330,9 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & } } - if (!batch.pos) { - assert(p0 >= 0); - pos.resize(batch.n_tokens); - for (int32_t i = 0; i < batch.n_tokens; i++) { - pos[i] = p0 + i; - } - batch.pos = pos.data(); - } + // + // auto-generate missing fields + // if (!batch.n_seq_id) { n_seq_id.resize(batch.n_tokens); @@ -349,6 +351,32 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & batch.seq_id = seq_id.data(); } + if (!batch.pos) { + pos.resize(batch.n_tokens); + + // initialize the starting position for each sequence based on the positions in the memory + llama_pos p0[LLAMA_MAX_PARALLEL_SEQUENCES]; + for (int32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + if (!memory) { + p0[s] = 0; + } else { + p0[s] = memory->seq_pos_max(s) + 1; + } + } + + for (int32_t i = 0; i < batch.n_tokens; i++) { + const llama_seq_id seq_id = batch.seq_id[i][0]; + + pos[i] = p0[seq_id]; + + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + p0[batch.seq_id[i][s]] = pos[i] + 1; + } + } + + batch.pos = pos.data(); + } + if (!batch.logits) { // by default return the output only for the last token output.resize(batch.n_tokens); @@ -356,13 +384,36 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & batch.logits = output.data(); } + // + // compute stats + // + for (int32_t i = 0; i < batch.n_tokens; ++i) { n_outputs += batch.logits[i] != 0; } + // determine coupled sequences + // these are pairs of sequences that have at least one token in the input batch that is assigned to both of them + for (int32_t i = 0; i < batch.n_tokens; ++i) { + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + seq_pos[batch.seq_id[i][s]].insert(batch.pos[i]); + + if (s > 0) { + const llama_seq_id s0 = batch.seq_id[i][0]; + const llama_seq_id s1 = batch.seq_id[i][s]; + + // mark that sequence s1 is coupled to s0 + seq_cpl[s1][s0] = true; + + // note: the other way around is not necessary for now + //seq_cpl[s0][s1] = true; + } + } + } + if (debug > 0) { - LLAMA_LOG_DEBUG("%s: input batch info (p0 = %d):\n", __func__, p0); - LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, batch.n_tokens); + LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__); + LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, batch.n_tokens); LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) batch.token); LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) batch.embd); LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) batch.pos); @@ -404,6 +455,58 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & batch.pos[i], batch.n_seq_id[i], ss.str().c_str(), batch.logits[i]); } LLAMA_LOG_DEBUG("%s: ]\n", __func__); + + LLAMA_LOG_DEBUG("%s: seq = [\n", __func__); + for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) { + if (seq_pos[s0].empty()) { + continue; + } + + std::stringstream ss; + for (int s1 = 0; s1 < (int) seq_cpl[s0].size(); ++s1) { + if (seq_cpl[s0][s1]) { + ss << s1 << " "; + } + } + + LLAMA_LOG_DEBUG("%s: %4d: pos = [%4d, %4d], cpl = %s\n", + __func__, s0, seq_pos_min(s0), seq_pos_max(s0), ss.str().empty() ? "-" : ss.str().c_str()); + } + LLAMA_LOG_DEBUG("%s: ]\n", __func__); + } + } + + // + // consistency checks + // + + for (int32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + if (seq_pos[s].empty()) { + continue; + } + + if (memory && seq_pos_min(s) != memory->seq_pos_max(s) + 1) { + LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s); + return false; + } + + if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) { + LLAMA_LOG_ERROR("%s: sequence %d positions are not continuous\n", __func__, s); + return false; + } + } + + if (memory) { + for (int32_t s0 = 0; s0 < LLAMA_MAX_PARALLEL_SEQUENCES; ++s0) { + for (int32_t s1 = 0; s1 < LLAMA_MAX_PARALLEL_SEQUENCES; ++s1) { + if (seq_cpl[s0][s1]) { + if (memory->seq_pos_min(s0) != memory->seq_pos_min(s1) || + memory->seq_pos_max(s0) != memory->seq_pos_max(s1)) { + LLAMA_LOG_ERROR("%s: sequence %d is coupled to %d in the input batch, but have divereged\n", __func__, s0, s1); + return false; + } + } + } } } @@ -418,6 +521,14 @@ uint32_t llama_batch_allocr::get_n_outputs() const { return n_outputs; } +llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const { + return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin(); +} + +llama_pos llama_batch_allocr::seq_pos_max(llama_seq_id seq_id) const { + return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin(); +} + void llama_batch_allocr::clear() { n_outputs = 0; @@ -426,6 +537,14 @@ void llama_batch_allocr::clear() { n_seq_id.clear(); seq_id.clear(); output.clear(); + + for (auto & cur : seq_pos) { + cur.clear(); + } + + for (auto & cur : seq_cpl) { + std::fill(cur.begin(), cur.end(), false); + } } // diff --git a/src/llama-batch.h b/src/llama-batch.h index 1e0be8ac2..04501ce5d 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -4,6 +4,7 @@ #include #include +#include // very similar to llama_batch, // but has more metadata about sequences @@ -77,18 +78,25 @@ struct llama_sbatch { llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false); }; -// temporary allocate memory for the input batch if needed +// a helper for sanitizing and fulfilling a batch class llama_batch_allocr { public: llama_batch_allocr(); - // optionally fulfill the batch returned by llama_batch_get_one - bool init(const llama_batch & batch_inp, const llama_vocab & vocab, llama_pos p0); + // sanitize and auto-gen missing data in the input batch + // memory is optional. if provided will be used to check for sequence continuity and to determine the positions + bool init( + const llama_batch & batch_inp, + const llama_vocab & vocab, + const llama_memory_i * memory); const llama_batch & get_batch() const; uint32_t get_n_outputs() const; + llama_pos seq_pos_min(llama_seq_id seq_id) const; + llama_pos seq_pos_max(llama_seq_id seq_id) const; + private: void clear(); @@ -103,5 +111,8 @@ private: std::vector seq_id; std::vector output; + std::vector> seq_pos; // seq_pos[s]: the set of positions in sequence s + std::vector> seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1 + int debug; }; diff --git a/src/llama-context.cpp b/src/llama-context.cpp index ec1e1189b..47c60e960 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -727,9 +727,8 @@ int llama_context::encode(const llama_batch & batch_inp) { return -1; } - // temporary allocate memory for the input batch if needed // note: during encode, we always pass the full sequence starting from pos = 0 - if (!batch_allocr->init(batch_inp, model.vocab, batch_inp.pos ? -1 : 0)) { + if (!batch_allocr->init(batch_inp, model.vocab, nullptr)) { LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); return -1; } @@ -895,8 +894,7 @@ int llama_context::decode(const llama_batch & batch_inp) { return -1; } - // temporary allocate memory for the input batch if needed - if (!batch_allocr->init(batch_inp, model.vocab, batch_inp.pos ? -1 : memory->seq_pos_max(0) + 1)) { + if (!batch_allocr->init(batch_inp, model.vocab, memory.get())) { LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); return -1; } diff --git a/src/llama-cparams.h b/src/llama-cparams.h index 2871031ef..51ebe5d17 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -4,6 +4,7 @@ #include +// TODO: rename to something shorter #define LLAMA_MAX_PARALLEL_SEQUENCES 64 struct llama_cparams { From c311ac664d68d10781a3e7b9f02d9d9520837d80 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 15 Jun 2025 10:08:58 +0300 Subject: [PATCH 05/26] cparams : rename LLAMA_MAX_PARALLEL_SEQUENCES to LLAMA_MAX_SEQ (#14188) ggml-ci --- src/llama-batch.cpp | 20 ++++++++++---------- src/llama-context.cpp | 10 +++++----- src/llama-cparams.cpp | 2 +- src/llama-cparams.h | 3 +-- src/llama-kv-cache-unified.cpp | 8 ++++---- src/llama-kv-cells.h | 16 ++++++++-------- 6 files changed, 29 insertions(+), 30 deletions(-) diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index 2265db9b2..a9f4a3d4c 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -289,10 +289,10 @@ llama_batch_allocr::llama_batch_allocr() { const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG"); debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0; - seq_pos.resize(LLAMA_MAX_PARALLEL_SEQUENCES); - seq_cpl.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + seq_pos.resize(LLAMA_MAX_SEQ); + seq_cpl.resize(LLAMA_MAX_SEQ); for (auto & cur : seq_cpl) { - cur.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + cur.resize(LLAMA_MAX_SEQ); } } @@ -322,8 +322,8 @@ bool llama_batch_allocr::init( if (batch.seq_id) { for (int32_t i = 0; i < batch.n_tokens; ++i) { for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { - if (batch.seq_id && (batch.seq_id[i][s] < 0 || batch.seq_id[i][s] >= LLAMA_MAX_PARALLEL_SEQUENCES)) { - LLAMA_LOG_ERROR("%s: invalid seq_id[%d][%d] = %d > %d\n", __func__, i, s, batch.seq_id[i][s], LLAMA_MAX_PARALLEL_SEQUENCES); + if (batch.seq_id && (batch.seq_id[i][s] < 0 || batch.seq_id[i][s] >= LLAMA_MAX_SEQ)) { + LLAMA_LOG_ERROR("%s: invalid seq_id[%d][%d] = %d > %d\n", __func__, i, s, batch.seq_id[i][s], LLAMA_MAX_SEQ); return false; } } @@ -355,8 +355,8 @@ bool llama_batch_allocr::init( pos.resize(batch.n_tokens); // initialize the starting position for each sequence based on the positions in the memory - llama_pos p0[LLAMA_MAX_PARALLEL_SEQUENCES]; - for (int32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + llama_pos p0[LLAMA_MAX_SEQ]; + for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { if (!memory) { p0[s] = 0; } else { @@ -480,7 +480,7 @@ bool llama_batch_allocr::init( // consistency checks // - for (int32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq_pos[s].empty()) { continue; } @@ -497,8 +497,8 @@ bool llama_batch_allocr::init( } if (memory) { - for (int32_t s0 = 0; s0 < LLAMA_MAX_PARALLEL_SEQUENCES; ++s0) { - for (int32_t s1 = 0; s1 < LLAMA_MAX_PARALLEL_SEQUENCES; ++s1) { + for (int32_t s0 = 0; s0 < LLAMA_MAX_SEQ; ++s0) { + for (int32_t s1 = 0; s1 < LLAMA_MAX_SEQ; ++s1) { if (seq_cpl[s0][s1]) { if (memory->seq_pos_min(s0) != memory->seq_pos_min(s1) || memory->seq_pos_max(s0) != memory->seq_pos_max(s1)) { diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 47c60e960..3a113d1bc 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -29,8 +29,8 @@ llama_context::llama_context( const auto & hparams = model.hparams; cparams.n_seq_max = std::max(1u, params.n_seq_max); - if (cparams.n_seq_max > LLAMA_MAX_PARALLEL_SEQUENCES) { - throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_PARALLEL_SEQUENCES)); + if (cparams.n_seq_max > LLAMA_MAX_SEQ) { + throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_SEQ)); } cparams.n_threads = params.n_threads; @@ -1023,8 +1023,8 @@ int llama_context::decode(const llama_batch & batch_inp) { if (!res) { // the last ubatch failed or was aborted -> remove all positions of that ubatch from the KV cache - llama_pos pos_min[LLAMA_MAX_PARALLEL_SEQUENCES]; - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + llama_pos pos_min[LLAMA_MAX_SEQ]; + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { pos_min[s] = std::numeric_limits::max(); } @@ -1035,7 +1035,7 @@ int llama_context::decode(const llama_batch & batch_inp) { pos_min[seq_id] = std::min(pos_min[seq_id], ubatch.pos[i]); } - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (pos_min[s] == std::numeric_limits::max()) { continue; } diff --git a/src/llama-cparams.cpp b/src/llama-cparams.cpp index f7b36590f..a3e7a37ee 100644 --- a/src/llama-cparams.cpp +++ b/src/llama-cparams.cpp @@ -1,5 +1,5 @@ #include "llama-cparams.h" size_t llama_max_parallel_sequences(void) { - return LLAMA_MAX_PARALLEL_SEQUENCES; + return LLAMA_MAX_SEQ; } diff --git a/src/llama-cparams.h b/src/llama-cparams.h index 51ebe5d17..118615d5b 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -4,8 +4,7 @@ #include -// TODO: rename to something shorter -#define LLAMA_MAX_PARALLEL_SEQUENCES 64 +#define LLAMA_MAX_SEQ 64 struct llama_cparams { uint32_t n_ctx; // context size used during inference diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index d4e92eab3..031070570 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -572,7 +572,7 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { LLAMA_LOG_DEBUG("\n%s\n", ss.c_str()); } - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (cells.seq_pos_min(s) < 0) { continue; } @@ -652,8 +652,8 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch // keep track of the max sequence position that we would overwrite with this ubatch // for non-SWA cache, this would be always empty - llama_seq_id seq_pos_max_rm[LLAMA_MAX_PARALLEL_SEQUENCES]; - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ]; + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { seq_pos_max_rm[s] = -1; } @@ -684,7 +684,7 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch // note: we want to preserve the invariant that all positions between [pos_min, pos_max] for each sequence // will be present in the cache. so we have to purge any position which is less than those we would overwrite // ref: https://github.com/ggml-org/llama.cpp/pull/13746#issuecomment-2916057092 - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq_pos_max_rm[s] == -1) { continue; } diff --git a/src/llama-kv-cells.h b/src/llama-kv-cells.h index acf30aebe..1d4e70f4d 100644 --- a/src/llama-kv-cells.h +++ b/src/llama-kv-cells.h @@ -23,7 +23,7 @@ public: used.clear(); - for (uint32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { seq_pos[s].clear(); } } @@ -240,7 +240,7 @@ public: llama_seq_id seq_get(uint32_t i) const { assert(seq[i].count() == 1); - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq[i].test(s)) { return s; } @@ -253,7 +253,7 @@ public: // return -1 if the sequence is not present llama_pos seq_pos_min(llama_seq_id seq_id) const { assert(seq_id >= 0); - assert(seq_id < LLAMA_MAX_PARALLEL_SEQUENCES); + assert(seq_id < LLAMA_MAX_SEQ); if (seq_pos[seq_id].empty()) { return -1; @@ -266,7 +266,7 @@ public: // return -1 if the sequence is not present llama_pos seq_pos_max(llama_seq_id seq_id) const { assert(seq_id >= 0); - assert(seq_id < LLAMA_MAX_PARALLEL_SEQUENCES); + assert(seq_id < LLAMA_MAX_SEQ); if (seq_pos[seq_id].empty()) { return -1; @@ -384,20 +384,20 @@ private: // std::vector shift; - using bits_t = std::bitset; + using bits_t = std::bitset; // the bitset seq[i] tells us which sequences are currently occupying the i-th cell std::vector seq; // the set seq_pos[s] tells us which positions are currently present for sequence s // this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache - std::set seq_pos[LLAMA_MAX_PARALLEL_SEQUENCES]; + std::set seq_pos[LLAMA_MAX_SEQ]; // helper functions for updating `seq_pos`, once cell at a time: // remove cell i void seq_pos_rm(uint32_t i) { - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq[i].test(s)) { seq_pos[s].erase(pos[i]); } @@ -406,7 +406,7 @@ private: // add cell i void seq_pos_add(uint32_t i) { - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq[i].test(s)) { seq_pos[s].insert(pos[i]); } From 9ae4143bc6ecb4c2f0f0301578f619f6c201b857 Mon Sep 17 00:00:00 2001 From: Mikko Juola Date: Sun, 15 Jun 2025 00:52:06 -0700 Subject: [PATCH 06/26] model : add dots.llm1 architecture support (#14044) (#14118) Adds: * Dots1Model to convert_hf_to_gguf.py * Computation graph code to llama-model.cpp * Chat template to llama-chat.cpp to detect this model's template. --- The model is called "dots.llm1" (I decided to shorten it to dots1 or DOTS1 in the code generally) architecture. The only models that exist as of writing of this commit that follow this architecture are "dots.llm1.inst" and "dots.llm1.base" from here: * https://huggingface.co/rednote-hilab/dots.llm1.inst * https://huggingface.co/rednote-hilab/dots.llm1.base The model architecture is a combination of Qwen and Deepseek parts, as seen here: https://github.com/huggingface/transformers/blob/ffe12627b4e84489d2ab91dd0ec00614855edc79/src/transformers/models/dots1/modular_dots1.py --- convert_hf_to_gguf.py | 28 +++++ gguf-py/gguf/constants.py | 26 ++++ gguf-py/gguf/tensor_mapping.py | 2 +- src/llama-arch.cpp | 29 +++++ src/llama-arch.h | 1 + src/llama-chat.cpp | 17 +++ src/llama-chat.h | 1 + src/llama-model.cpp | 222 +++++++++++++++++++++++++++++++++ src/llama-model.h | 1 + 9 files changed, 326 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 173a103ba..cff72c85f 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -5262,6 +5262,34 @@ class DeepseekV2Model(TextModel): raise ValueError(f"Unprocessed experts: {experts}") +@ModelBase.register("Dots1ForCausalLM") +class Dots1Model(Qwen2MoeModel): + model_arch = gguf.MODEL_ARCH.DOTS1 + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.hparams["num_experts"] = self.hparams["n_routed_experts"] + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_leading_dense_block_count(self.hparams["first_k_dense_replace"]) + self.gguf_writer.add_expert_shared_count(self.hparams["n_shared_experts"]) + self.gguf_writer.add_expert_weights_scale(self.hparams["routed_scaling_factor"]) + self.gguf_writer.add_expert_weights_norm(self.hparams["norm_topk_prob"]) + + if self.hparams["scoring_func"] == "noaux_tc": + self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID) + else: + raise ValueError(f"Unsupported scoring_func value: {self.hparams['scoring_func']}") + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): + if name.endswith("e_score_correction_bias"): + name = name.replace("e_score_correction_bias", "e_score_correction.bias") + if "shared_experts" in name: + return [(self.map_tensor_name(name), data_torch)] + return super().modify_tensors(data_torch, name, bid) + + @ModelBase.register("PLMForCausalLM") class PLMModel(TextModel): model_arch = gguf.MODEL_ARCH.PLM diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 3ee2b2064..8de2f7a53 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -343,6 +343,7 @@ class MODEL_ARCH(IntEnum): WAVTOKENIZER_DEC = auto() PLM = auto() BAILINGMOE = auto() + DOTS1 = auto() class VISION_PROJECTOR_TYPE(IntEnum): @@ -623,6 +624,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.WAVTOKENIZER_DEC: "wavtokenizer-dec", MODEL_ARCH.PLM: "plm", MODEL_ARCH.BAILINGMOE: "bailingmoe", + MODEL_ARCH.DOTS1: "dots1" } VISION_PROJECTOR_TYPE_NAMES: dict[VISION_PROJECTOR_TYPE, str] = { @@ -2044,6 +2046,30 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_DOWN_SHEXP, MODEL_TENSOR.FFN_UP_SHEXP, ], + MODEL_ARCH.DOTS1: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_EXP_PROBS_B, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_GATE_SHEXP, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_DOWN_SHEXP, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_UP_EXP, + MODEL_TENSOR.FFN_UP_SHEXP, + ], # TODO } diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 439fc1afe..5e3f01754 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -305,7 +305,7 @@ class TensorNameMap: ), MODEL_TENSOR.FFN_EXP_PROBS_B: ( - "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3 + "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3 dots1 ), # Feed-forward up diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 43fa60a80..f8f76eedd 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -72,6 +72,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" }, { LLM_ARCH_PLM, "plm" }, { LLM_ARCH_BAILINGMOE, "bailingmoe" }, + { LLM_ARCH_DOTS1, "dots1" }, { LLM_ARCH_UNKNOWN, "(unknown)" }, }; @@ -1555,6 +1556,34 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, }, }, + { + LLM_ARCH_DOTS1, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" }, + } + }, { LLM_ARCH_UNKNOWN, { diff --git a/src/llama-arch.h b/src/llama-arch.h index f3825528a..18f6d6b94 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -76,6 +76,7 @@ enum llm_arch { LLM_ARCH_WAVTOKENIZER_DEC, LLM_ARCH_PLM, LLM_ARCH_BAILINGMOE, + LLM_ARCH_DOTS1, LLM_ARCH_UNKNOWN, }; diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index d12743e6b..bc4fa05a7 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -183,6 +183,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { return LLM_CHAT_TEMPLATE_BAILING; } else if (tmpl_contains("<|header_start|>") && tmpl_contains("<|header_end|>")) { return LLM_CHAT_TEMPLATE_LLAMA4; + } else if (tmpl_contains("<|endofuserprompt|>")) { + return LLM_CHAT_TEMPLATE_DOTS1; } return LLM_CHAT_TEMPLATE_UNKNOWN; } @@ -643,6 +645,21 @@ int32_t llm_chat_apply_template( if (add_ass) { ss << "Assistant:"; } + } else if (tmpl == LLM_CHAT_TEMPLATE_DOTS1) { + // dots.llm1.inst (DOTS1) + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "<|system|>" << message->content << "<|endofsystem|>"; + } else if (role == "user") { + ss << "<|userprompt|>" << message->content << "<|endofuserprompt|>"; + } else { + ss << "<|response|>" << message->content << "<|endofresponse|>"; + } + } + if (add_ass) { + ss << "<|response|>"; + } } else { // template not supported return -1; diff --git a/src/llama-chat.h b/src/llama-chat.h index db24ade21..38800010a 100644 --- a/src/llama-chat.h +++ b/src/llama-chat.h @@ -43,6 +43,7 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_BAILING, LLM_CHAT_TEMPLATE_LLAMA4, LLM_CHAT_TEMPLATE_SMOLVLM, + LLM_CHAT_TEMPLATE_DOTS1, LLM_CHAT_TEMPLATE_UNKNOWN, }; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index c64bf9de9..fdd5fefd6 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -80,6 +80,7 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_40B: return "40B"; case LLM_TYPE_65B: return "65B"; case LLM_TYPE_70B: return "70B"; + case LLM_TYPE_142B: return "142B"; case LLM_TYPE_236B: return "236B"; case LLM_TYPE_290B: return "290B"; case LLM_TYPE_314B: return "314B"; @@ -1444,6 +1445,20 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_DOTS1: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false); + ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false); + switch (hparams.n_layer) { + case 62: type = LLM_TYPE_142B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; default: throw std::runtime_error("unsupported model architecture"); } @@ -4123,6 +4138,58 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); } } break; + case LLM_ARCH_DOTS1: + { + const int64_t n_ff_exp = hparams.n_ff_exp; + const int64_t n_expert_shared = hparams.n_expert_shared; + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + if (i < (int) hparams.n_layer_dense_lead) { + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } else { + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED); + + if (n_expert == 0) { + throw std::runtime_error("n_expert must be > 0"); + } + if (n_expert_used == 0) { + throw std::runtime_error("n_expert_used must be > 0"); + } + + // MoE branch + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + + // Shared expert branch + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); + } + } + } break; default: throw std::runtime_error("unknown architecture"); } @@ -13194,6 +13261,156 @@ struct llm_build_bailingmoe : public llm_graph_context { } }; +struct llm_build_dots1 : public llm_graph_context { + llm_build_dots1(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self_attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + cb(Kcur, "Kcur_normed", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + if ((uint32_t) il < hparams.n_layer_dense_lead) { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + ggml_tensor * moe_out = + build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + model.layers[il].ffn_exp_probs_b, + n_expert, n_expert_used, + LLM_FFN_SILU, hparams.expert_weights_norm, + true, hparams.expert_weights_scale, + (llama_expert_gating_func_type) hparams.expert_gating_func, + il); + cb(moe_out, "ffn_moe_out", il); + + { + ggml_tensor * ffn_shexp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + cb(cur, "ffn_out", il); + } + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_cparams & cparams) const { llama_memory_i * res; @@ -13532,6 +13749,10 @@ llm_graph_result_ptr llama_model::build_graph( { llm = std::make_unique(*this, params, gf); } break; + case LLM_ARCH_DOTS1: + { + llm = std::make_unique(*this, params, gf); + } break; default: GGML_ABORT("fatal error"); } @@ -13714,6 +13935,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_NEMOTRON: case LLM_ARCH_EXAONE: case LLM_ARCH_MINICPM3: + case LLM_ARCH_DOTS1: return LLAMA_ROPE_TYPE_NEOX; case LLM_ARCH_QWEN2VL: diff --git a/src/llama-model.h b/src/llama-model.h index 18b714620..06e6c6879 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -73,6 +73,7 @@ enum llm_type { LLM_TYPE_40B, LLM_TYPE_65B, LLM_TYPE_70B, + LLM_TYPE_142B, LLM_TYPE_236B, LLM_TYPE_290B, LLM_TYPE_314B, From 5fce5f948df8f189a5401a8ecaa9753106e75abb Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 15 Jun 2025 10:52:11 +0300 Subject: [PATCH 07/26] kv-cache : fix use-after-move of defrag info (#14189) ggml-ci --- src/llama-kv-cache-unified.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index 031070570..b17936abd 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -1739,7 +1739,7 @@ llama_kv_cache_unified_state::llama_kv_cache_unified_state( llama_context * lctx, bool do_shift, defrag_info dinfo) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), lctx(lctx), do_shift(do_shift), dinfo(std::move(dinfo)) { - if (!do_shift && dinfo.empty()) { + if (!do_shift && this->dinfo.empty()) { status = LLAMA_MEMORY_STATUS_NO_UPDATE; } } From 2c2caa444341d99c87ff153f142c2d4762a776a2 Mon Sep 17 00:00:00 2001 From: uvos Date: Sun, 15 Jun 2025 15:45:27 +0200 Subject: [PATCH 08/26] HIP: Replace usage of depricated preprocessor macro __AMDGCN_WAVEFRONT_SIZE__ (#14183) --- ggml/src/ggml-cuda/common.cuh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index a82ec26ee..563a7828b 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -262,11 +262,11 @@ static bool cp_async_available(const int cc) { } static constexpr __device__ int ggml_cuda_get_physical_warp_size() { -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) - return __AMDGCN_WAVEFRONT_SIZE; +#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && (defined(__GFX9__) || defined(__GFX8__)) + return 64; #else return 32; -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && (defined(__GFX9__) || defined(__GFX8__)) } [[noreturn]] From e54b394082de242be4ee2e692b11fcc8d4eba371 Mon Sep 17 00:00:00 2001 From: uvos Date: Sun, 15 Jun 2025 17:30:13 +0200 Subject: [PATCH 09/26] CUDA/HIP: fix ssm_scan on devices where warp size is not 32 (#14196) --- ggml/src/ggml-cuda/ssm-scan.cu | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-cuda/ssm-scan.cu b/ggml/src/ggml-cuda/ssm-scan.cu index 37ee208c0..2d34b8360 100644 --- a/ggml/src/ggml-cuda/ssm-scan.cu +++ b/ggml/src/ggml-cuda/ssm-scan.cu @@ -10,6 +10,8 @@ __global__ void __launch_bounds__(splitD, 2) float * __restrict__ dst, const int64_t L) { GGML_UNUSED(src1_nb0); GGML_UNUSED(src2_nb0); + + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); const int bidx = blockIdx.x; // split along B const int bidy = blockIdx.y; // split along D const int tid = threadIdx.x; @@ -44,16 +46,16 @@ __global__ void __launch_bounds__(splitD, 2) if (N == 16) { #pragma unroll for (size_t i = 0; i < splitD / 4; i += 2) { - float value = A_block[(wid * warpSize + i) * stride_A + wtid]; + float value = A_block[(wid * warp_size + i) * stride_A + wtid]; // todo: bank conflict // I am always confused with how to use the swizzling method to solve // bank conflit. Hoping somebody can tell me. - smem_A[(wid * warpSize + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; + smem_A[(wid * warp_size + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; } #pragma unroll for (size_t i = 0; i < splitD / 4; i += 2) { - float value = s0_block[(wid * warpSize + i) * stride_s0 + wtid]; - smem_s0[(wid * warpSize + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; + float value = s0_block[(wid * warp_size + i) * stride_s0 + wtid]; + smem_s0[(wid * warp_size + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; } } From 30e5b01de2a0bcddc7c063c8ef0802703a958417 Mon Sep 17 00:00:00 2001 From: Ed Addario <29247825+EAddario@users.noreply.github.com> Date: Sun, 15 Jun 2025 17:53:45 +0100 Subject: [PATCH 10/26] quantize : change int to unsigned int for KV overrides (#14197) --- src/llama-quant.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 159b1307a..8cf45732f 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -585,7 +585,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) { gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) { - gguf_set_val_i32(ctx_out.get(), o.key, o.val_i64); + // Setting type to UINT32. See https://github.com/ggml-org/llama.cpp/pull/14182 for context + gguf_set_val_u32(ctx_out.get(), o.key, (uint32_t)abs(o.val_i64)); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) { gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) { From cd355eda7df1898d25d433b4bdaa4b4b479e0bad Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Sun, 15 Jun 2025 23:36:22 +0200 Subject: [PATCH 11/26] server : When listening on a unix domain socket don't print http:// and port (#14180) Instead show something like this: main: server is listening on file.sock - starting the main loop Signed-off-by: Eric Curtin --- tools/server/server.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index b439d8b19..626c58bd3 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -4878,7 +4878,9 @@ int main(int argc, char ** argv) { }; bool was_bound = false; + bool is_sock = false; if (string_ends_with(std::string(params.hostname), ".sock")) { + is_sock = true; LOG_INF("%s: setting address family to AF_UNIX\n", __func__); svr->set_address_family(AF_UNIX); // bind_to_port requires a second arg, any value other than 0 should @@ -4956,7 +4958,9 @@ int main(int argc, char ** argv) { SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); #endif - LOG_INF("%s: server is listening on http://%s:%d - starting the main loop\n", __func__, params.hostname.c_str(), params.port); + LOG_INF("%s: server is listening on %s - starting the main loop\n", __func__, + is_sock ? string_format("unix://%s", params.hostname.c_str()).c_str() : + string_format("http://%s:%d", params.hostname.c_str(), params.port).c_str()); // this call blocks the main thread until queue_tasks.terminate() is called ctx_server.queue_tasks.start_loop(); From d7da8dc83a03b30e1ec10317080082ea76840c38 Mon Sep 17 00:00:00 2001 From: Bartowski <3266127+bartowski1182@users.noreply.github.com> Date: Mon, 16 Jun 2025 00:04:06 +0100 Subject: [PATCH 12/26] model : Add support for Arcee AI's upcoming AFM model (#14185) * Add Arcee AFM support * Add draft update code * Fix linter and update URL, may still not be final * Update src/llama-model.cpp Co-authored-by: Xuan-Son Nguyen * Remote accidental blank line --------- Co-authored-by: Xuan-Son Nguyen --- convert_hf_to_gguf.py | 14 +++ convert_hf_to_gguf_update.py | 1 + gguf-py/gguf/constants.py | 19 +++- src/llama-arch.cpp | 19 ++++ src/llama-arch.h | 1 + src/llama-model.cpp | 181 +++++++++++++++++++++++++++++++++++ src/llama-vocab.cpp | 1 + 7 files changed, 235 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index cff72c85f..2232a7d82 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2020,6 +2020,20 @@ class LlamaModel(TextModel): raise ValueError(f"Unprocessed experts: {experts}") +@ModelBase.register("ArceeForCausalLM") +class ArceeModel(LlamaModel): + model_arch = gguf.MODEL_ARCH.ARCEE + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self._try_set_pooling_type() + rope_scaling = self.hparams.get("rope_scaling") or {} + if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) + self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) + self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) + + @ModelBase.register( "LlavaForConditionalGeneration", # pixtral "Mistral3ForConditionalGeneration", # mistral small 3.1 diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index 2f733f097..fae4f7260 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -128,6 +128,7 @@ models = [ {"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", }, {"name": "pixtral", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistral-community/pixtral-12b", }, {"name": "seed-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base", }, + {"name": "arcee", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/arcee-ai/AFM-4.5B", }, # TODO confirm final URL ] # some models are known to be broken upstream, so we will skip them as exceptions diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 8de2f7a53..9b2143c7c 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -344,6 +344,7 @@ class MODEL_ARCH(IntEnum): PLM = auto() BAILINGMOE = auto() DOTS1 = auto() + ARCEE = auto() class VISION_PROJECTOR_TYPE(IntEnum): @@ -624,7 +625,8 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.WAVTOKENIZER_DEC: "wavtokenizer-dec", MODEL_ARCH.PLM: "plm", MODEL_ARCH.BAILINGMOE: "bailingmoe", - MODEL_ARCH.DOTS1: "dots1" + MODEL_ARCH.DOTS1: "dots1", + MODEL_ARCH.ARCEE: "arcee", } VISION_PROJECTOR_TYPE_NAMES: dict[VISION_PROJECTOR_TYPE, str] = { @@ -2070,6 +2072,21 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_UP_EXP, MODEL_TENSOR.FFN_UP_SHEXP, ], + MODEL_ARCH.ARCEE: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], # TODO } diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index f8f76eedd..a3e7c861c 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -73,6 +73,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_PLM, "plm" }, { LLM_ARCH_BAILINGMOE, "bailingmoe" }, { LLM_ARCH_DOTS1, "dots1" }, + { LLM_ARCH_ARCEE, "arcee" }, { LLM_ARCH_UNKNOWN, "(unknown)" }, }; @@ -244,6 +245,24 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, }, }, + { + LLM_ARCH_ARCEE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_LLAMA4, { diff --git a/src/llama-arch.h b/src/llama-arch.h index 18f6d6b94..168fdcb40 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -77,6 +77,7 @@ enum llm_arch { LLM_ARCH_PLM, LLM_ARCH_BAILINGMOE, LLM_ARCH_DOTS1, + LLM_ARCH_ARCEE, LLM_ARCH_UNKNOWN, }; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index fdd5fefd6..dcc8b0be7 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -599,6 +599,16 @@ void llama_model::load_hparams(llama_model_loader & ml) { hparams.use_kq_norm = false; } } break; + case LLM_ARCH_ARCEE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + // Arcee uses the same structure as Llama + switch (hparams.n_layer) { + case 36: type = LLM_TYPE_4B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_DECI: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -4190,6 +4200,37 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } } } break; + case LLM_ARCH_ARCEE: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; default: throw std::runtime_error("unknown architecture"); } @@ -13411,6 +13452,141 @@ struct llm_build_dots1 : public llm_graph_context { } }; +struct llm_build_arcee : public llm_graph_context { + llm_build_arcee(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + // ARCEE uses relu^2 instead of silu + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_RELU_SQR, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_cparams & cparams) const { llama_memory_i * res; @@ -13753,6 +13929,10 @@ llm_graph_result_ptr llama_model::build_graph( { llm = std::make_unique(*this, params, gf); } break; + case LLM_ARCH_ARCEE: + { + llm = std::make_unique(*this, params, gf); + } break; default: GGML_ABORT("fatal error"); } @@ -13902,6 +14082,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_GRANITE_MOE: case LLM_ARCH_CHAMELEON: case LLM_ARCH_BAILINGMOE: + case LLM_ARCH_ARCEE: return LLAMA_ROPE_TYPE_NORM; // the pairs of head values are offset by n_rot/2 diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 905d7c428..dd2251ef3 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -1987,6 +1987,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { || t.first == "<|eom_id|>" || t.first == "" || t.first == "_" + || t.first == "<|end_of_text|>" ) { special_eog_ids.insert(t.second); if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) { From 3555b3004ba7687be3d734acade52a3345758aa4 Mon Sep 17 00:00:00 2001 From: xctan Date: Mon, 16 Jun 2025 13:54:15 +0800 Subject: [PATCH 13/26] ggml-cpu : rework weak alias on apple targets (#14146) * ggml-cpu : rework weak alias on apple targets * fix powerpc detection * fix ppc detection * fix powerpc detection on darwin --- ggml/cmake/common.cmake | 3 +- ggml/src/ggml-cpu/apple-fallback.h | 88 ++++++++++++++++++++++++++++++ ggml/src/ggml-cpu/ggml-cpu-impl.h | 2 +- ggml/src/ggml-cpu/quants.c | 4 ++ ggml/src/ggml-cpu/quants.h | 27 --------- ggml/src/ggml-cpu/repack.cpp | 4 ++ ggml/src/ggml-cpu/repack.h | 18 +----- 7 files changed, 99 insertions(+), 47 deletions(-) create mode 100644 ggml/src/ggml-cpu/apple-fallback.h diff --git a/ggml/cmake/common.cmake b/ggml/cmake/common.cmake index bb1ec9b37..cb6638833 100644 --- a/ggml/cmake/common.cmake +++ b/ggml/cmake/common.cmake @@ -36,8 +36,7 @@ function(ggml_get_system_arch) (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) set(GGML_SYSTEM_ARCH "x86" PARENT_SCOPE) - elseif ("${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "ppc64le " OR - "${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "powerpc ") + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc|power") set(GGML_SYSTEM_ARCH "PowerPC" PARENT_SCOPE) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") set(GGML_SYSTEM_ARCH "loongarch64" PARENT_SCOPE) diff --git a/ggml/src/ggml-cpu/apple-fallback.h b/ggml/src/ggml-cpu/apple-fallback.h new file mode 100644 index 000000000..f477505d7 --- /dev/null +++ b/ggml/src/ggml-cpu/apple-fallback.h @@ -0,0 +1,88 @@ +#pragma once + +// Solve alias issue for Apple targets (currently PowerPC, x86, and ARM64). +// Mach-O has a weak alias equivalent but no practical compiler support can +// be found, so we need to do it manually. +// ref: https://stackoverflow.com/questions/42757744 +// +// This file is a complement to native implementations in the `arch` folder. +// A kernel in quants.c or repack.cpp is either: +// - implemented in the `arch` folder, or +// - defined in this file to remove the `_generic` suffix + +#if defined(GGML_CPU_GENERIC) +// quants.c +#define quantize_row_q8_0_generic quantize_row_q8_0 +#define quantize_row_q8_1_generic quantize_row_q8_1 +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 +#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 +#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 +#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 +#define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K +#define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K +#define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K +#define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K +#define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 +#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__aarch64__) || defined(__arm__) +// repack.cpp +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#elif defined(__x86_64__) || defined(__i386__) +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__POWERPC__) +// ref: https://github.com/ggml-org/llama.cpp/pull/14146#issuecomment-2972561679 +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#endif diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index 69415daa8..9662e4d7b 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -509,7 +509,7 @@ void ggml_barrier(struct ggml_threadpool * tp); #define GGML_DO_PRAGMA_(x) _Pragma (#x) #define GGML_DO_PRAGMA(x) GGML_DO_PRAGMA_(x) -#if defined(GGML_CPU_GENERIC) || defined(__HIPCC__) +#if defined(GGML_CPU_GENERIC) || defined(__HIPCC__) || defined(__APPLE__) // Note for Apple targets: // - clang: aliases are not supported on darwin // - all native kernels need to be implemented in both x86 and arm files diff --git a/ggml/src/ggml-cpu/quants.c b/ggml/src/ggml-cpu/quants.c index 1ca9c50e7..516c5b2ce 100644 --- a/ggml/src/ggml-cpu/quants.c +++ b/ggml/src/ggml-cpu/quants.c @@ -5,6 +5,10 @@ #include "ggml-quants.h" #include "quants.h" +#if defined(__APPLE__) +#include "apple-fallback.h" +#endif + #include #include #include diff --git a/ggml/src/ggml-cpu/quants.h b/ggml/src/ggml-cpu/quants.h index d729e07d6..dc4342c87 100644 --- a/ggml/src/ggml-cpu/quants.h +++ b/ggml/src/ggml-cpu/quants.h @@ -84,33 +84,6 @@ void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -#if defined(GGML_CPU_GENERIC) -#define quantize_row_q8_0_generic quantize_row_q8_0 -#define quantize_row_q8_1_generic quantize_row_q8_1 -#define quantize_row_q8_K_generic quantize_row_q8_K -#define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 -#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 -#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 -#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 -#define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 -#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K -#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K -#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K -#define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K -#define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K -#define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K -#define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K -#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K -#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K -#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K -#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K -#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K -#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K -#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K -#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 -#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K -#endif - #ifdef __cplusplus } #endif diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 628142d5f..604ccee90 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -8,6 +8,10 @@ #include "ggml-cpu-impl.h" #include "traits.h" +#if defined(__APPLE__) +#include "apple-fallback.h" +#endif + #include #include #include diff --git a/ggml/src/ggml-cpu/repack.h b/ggml/src/ggml-cpu/repack.h index 8ee6e92ea..b13d2d0c7 100644 --- a/ggml/src/ggml-cpu/repack.h +++ b/ggml/src/ggml-cpu/repack.h @@ -67,7 +67,7 @@ extern "C" { // Workaround for clang: // clang++ complains: ``error: call to 'ggml_gemm_q4_0_4x4_q8_0' is ambiguous'' // repro: https://godbolt.org/z/oKdeWKonM (ICE), https://godbolt.org/z/1szq6P36v (ambiguous call) -#if defined(GGML_CPU_CLANG_WORKAROUND) || !(defined(__GNUC__) && defined(__clang__)) || defined(__HIPCC__) +#if defined(GGML_CPU_CLANG_WORKAROUND) || defined(__APPLE__) || !(defined(__GNUC__) && defined(__clang__)) || defined(__HIPCC__) void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); @@ -98,22 +98,6 @@ void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); -#if defined(GGML_CPU_GENERIC) -#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 -#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 -#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 -#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 -#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 -#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 -#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K -#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 -#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 -#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 -#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 -#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K -#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 -#endif - #if defined(__cplusplus) } // extern "C" #endif From c89c2d1ab94b11845240b7d3313c87691ea18d88 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 16 Jun 2025 00:21:08 -0600 Subject: [PATCH 14/26] vulkan: mutex around vkQueueSubmit (#14127) This fixes the remaining crash in test-thread-safety on my system. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 32d640744..8d62303aa 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -168,6 +168,11 @@ struct vk_command_pool { vk_queue *q; }; +// Prevent simultaneous submissions to the same queue. +// This could be per vk_queue if we stopped having two vk_queue structures +// sharing the same vk::Queue. +static std::mutex queue_mutex; + struct vk_queue { uint32_t queue_family_index; vk::Queue queue; @@ -1266,6 +1271,7 @@ static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_command static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) { if (ctx->seqs.empty()) { if (fence) { + std::lock_guard guard(queue_mutex); ctx->p->q->queue.submit({}, fence); } return; @@ -1335,6 +1341,7 @@ static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) { } } + std::lock_guard guard(queue_mutex); ctx->p->q->queue.submit(submit_infos, fence); ctx->seqs.clear(); From 4ad243677bca6c97f14dbc187b2116b51fcb7ffd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90inh=20Tr=E1=BB=8Dng=20Huy?= <77562200+huydt84@users.noreply.github.com> Date: Mon, 16 Jun 2025 16:20:59 +0900 Subject: [PATCH 15/26] gguf-py : allow key override when adding value to GGUFWriter (#14194) Co-authored-by: dinhhuy --- gguf-py/gguf/gguf_writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index adc673e38..54ca0c33f 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -271,7 +271,7 @@ class GGUFWriter: def add_key_value(self, key: str, val: Any, vtype: GGUFValueType, sub_type: GGUFValueType | None = None) -> None: if any(key in kv_data for kv_data in self.kv_data): - raise ValueError(f'Duplicated key name {key!r}') + logger.warning(f'Duplicated key name {key!r}, overwriting it with new value {val!r} of type {vtype.name}') self.kv_data[0][key] = GGUFValue(value=val, type=vtype, sub_type=sub_type) From 0bf49eb668bb95b50e41583e22aaf60ddade1fbe Mon Sep 17 00:00:00 2001 From: Bartowski <3266127+bartowski1182@users.noreply.github.com> Date: Mon, 16 Jun 2025 09:16:06 +0100 Subject: [PATCH 16/26] convert : remove arcee change in convert_hf_to_gguf_update.py (#14207) --- convert_hf_to_gguf_update.py | 1 - 1 file changed, 1 deletion(-) diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index fae4f7260..2f733f097 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -128,7 +128,6 @@ models = [ {"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", }, {"name": "pixtral", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistral-community/pixtral-12b", }, {"name": "seed-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base", }, - {"name": "arcee", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/arcee-ai/AFM-4.5B", }, # TODO confirm final URL ] # some models are known to be broken upstream, so we will skip them as exceptions From 3ba0d843c6bd3faea5cf5e53dc7f3c82be20bffb Mon Sep 17 00:00:00 2001 From: Charles Xu Date: Mon, 16 Jun 2025 11:47:57 +0200 Subject: [PATCH 17/26] ggml: Add Android support for GGML_CPU_ALL_VARIANTS (#14206) --- ggml/src/CMakeLists.txt | 34 +++++++++----- ggml/src/ggml-cpu/CMakeLists.txt | 77 +++++++++++++++----------------- 2 files changed, 59 insertions(+), 52 deletions(-) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 726da5e04..17c9366f4 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -311,18 +311,28 @@ if (GGML_CPU_ALL_VARIANTS) # MSVC doesn't support AMX ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) endif() - elseif(GGML_SYSTEM_ARCH STREQUAL "ARM" AND CMAKE_SYSTEM_NAME MATCHES "Linux") - # Many of these features are optional so we build versions with popular - # combinations and name the backends based on the version they were - # first released with - ggml_add_cpu_backend_variant(armv8.0_1) - ggml_add_cpu_backend_variant(armv8.2_1 DOTPROD) - ggml_add_cpu_backend_variant(armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) - ggml_add_cpu_backend_variant(armv8.2_3 DOTPROD FP16_VECTOR_ARITHMETIC SVE) - ggml_add_cpu_backend_variant(armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8) - ggml_add_cpu_backend_variant(armv8.6_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2) - ggml_add_cpu_backend_variant(armv9.2_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SME) - ggml_add_cpu_backend_variant(armv9.2_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2 SME) + elseif(GGML_SYSTEM_ARCH STREQUAL "ARM") + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + # Many of these features are optional so we build versions with popular + # combinations and name the backends based on the version they were + # first released with + ggml_add_cpu_backend_variant(armv8.0_1) + ggml_add_cpu_backend_variant(armv8.2_1 DOTPROD) + ggml_add_cpu_backend_variant(armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) + ggml_add_cpu_backend_variant(armv8.2_3 DOTPROD FP16_VECTOR_ARITHMETIC SVE) + ggml_add_cpu_backend_variant(armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8) + ggml_add_cpu_backend_variant(armv8.6_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2) + ggml_add_cpu_backend_variant(armv9.2_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SME) + ggml_add_cpu_backend_variant(armv9.2_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2 SME) + elseif (CMAKE_SYSTEM_NAME MATCHES "Android") + # Android-specific backends with SoC-compatible feature sets + ggml_add_cpu_backend_variant(android_armv8.0_1) + ggml_add_cpu_backend_variant(android_armv8.2_1 DOTPROD) + ggml_add_cpu_backend_variant(android_armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) + ggml_add_cpu_backend_variant(android_armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC MATMUL_INT8) + else() + message(FATAL_ERROR "Unsupported ARM target OS: ${CMAKE_SYSTEM_NAME}") + endif() else() message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported with ${GGML_SYSTEM_ARCH} on ${CMAKE_SYSTEM_NAME}") endif() diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index e4c0fa8d0..3bd1b0507 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -158,48 +158,45 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_CPU_ARM_ARCH) list(APPEND ARCH_FLAGS -march=${GGML_CPU_ARM_ARCH}) elseif(GGML_CPU_ALL_VARIANTS) - if (CMAKE_SYSTEM_NAME MATCHES "Linux") - # Begin with the lowest baseline - set(ARM_MCPU "armv8-a") - set(ARCH_TAGS "") - set(ARCH_DEFINITIONS "") + # Begin with the lowest baseline + set(ARM_MCPU "armv8-a") + set(ARCH_TAGS "") + set(ARCH_DEFINITIONS "") - # When a feature is selected, bump the MCPU to the first - # version that supported it - if (GGML_INTERNAL_DOTPROD) - set(ARM_MCPU "armv8.2-a") - set(ARCH_TAGS "${ARCH_TAGS}+dotprod") - list(APPEND ARCH_DEFINITIONS GGML_USE_DOTPROD) - endif() - if (GGML_INTERNAL_FP16_VECTOR_ARITHMETIC) - set(ARM_MCPU "armv8.2-a") - set(ARCH_TAGS "${ARCH_TAGS}+fp16") - list(APPEND ARCH_DEFINITIONS GGML_USE_FP16_VECTOR_ARITHMETIC) - endif() - if (GGML_INTERNAL_SVE) - set(ARM_MCPU "armv8.2-a") - set(ARCH_TAGS "${ARCH_TAGS}+sve") - list(APPEND ARCH_DEFINITIONS GGML_USE_SVE) - endif() - if (GGML_INTERNAL_MATMUL_INT8) - set(ARM_MCPU "armv8.6-a") - set(ARCH_TAGS "${ARCH_TAGS}+i8mm") - list(APPEND ARCH_DEFINITIONS GGML_USE_MATMUL_INT8) - endif() - if (GGML_INTERNAL_SVE2) - set(ARM_MCPU "armv8.6-a") - set(ARCH_TAGS "${ARCH_TAGS}+sve2") - list(APPEND ARCH_DEFINITIONS GGML_USE_SVE2) - endif() - if (GGML_INTERNAL_SME) - set(ARM_MCPU "armv9.2-a") - set(ARCH_TAGS "${ARCH_TAGS}+sme") - list(APPEND ARCH_DEFINITIONS GGML_USE_SME) - endif() - - list(APPEND ARCH_FLAGS "-march=${ARM_MCPU}${ARCH_TAGS}") - ggml_add_cpu_backend_features(${GGML_CPU_NAME} arm ${ARCH_DEFINITIONS}) + # When a feature is selected, bump the MCPU to the first + # version that supported it + if (GGML_INTERNAL_DOTPROD) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+dotprod") + list(APPEND ARCH_DEFINITIONS GGML_USE_DOTPROD) endif() + if (GGML_INTERNAL_FP16_VECTOR_ARITHMETIC) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+fp16") + list(APPEND ARCH_DEFINITIONS GGML_USE_FP16_VECTOR_ARITHMETIC) + endif() + if (GGML_INTERNAL_SVE) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+sve") + list(APPEND ARCH_DEFINITIONS GGML_USE_SVE) + endif() + if (GGML_INTERNAL_MATMUL_INT8) + set(ARM_MCPU "armv8.6-a") + set(ARCH_TAGS "${ARCH_TAGS}+i8mm") + list(APPEND ARCH_DEFINITIONS GGML_USE_MATMUL_INT8) + endif() + if (GGML_INTERNAL_SVE2) + set(ARM_MCPU "armv8.6-a") + set(ARCH_TAGS "${ARCH_TAGS}+sve2") + list(APPEND ARCH_DEFINITIONS GGML_USE_SVE2) + endif() + if (GGML_INTERNAL_SME) + set(ARM_MCPU "armv9.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+sme") + list(APPEND ARCH_DEFINITIONS GGML_USE_SME) + endif() + list(APPEND ARCH_FLAGS "-march=${ARM_MCPU}${ARCH_TAGS}") + ggml_add_cpu_backend_features(${GGML_CPU_NAME} arm ${ARCH_DEFINITIONS}) endif() endif() From d3e64b9f490cee41b7b9aa275dae2f6568ae3054 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 16 Jun 2025 14:14:00 +0300 Subject: [PATCH 18/26] llama : rework embeddings logic (#14208) * llama : rework embeddings logic ggml-ci * cont : fix rerank ggml-ci * cont : engrish [no ci] * cont : fix rerank ggml-ci * server : support both embeddings and completions with single model ggml-ci * cont : avoid embeddings_org ggml-ci --- common/arg.cpp | 9 +-- common/common.cpp | 62 +++++++++--------- common/common.h | 1 - examples/gritlm/gritlm.cpp | 8 ++- include/llama.h | 12 ++-- src/llama-batch.cpp | 30 +++++++-- src/llama-batch.h | 3 +- src/llama-context.cpp | 26 ++++---- src/llama-kv-cache-recurrent.cpp | 8 +-- src/llama-kv-cache-recurrent.h | 2 +- src/llama-kv-cache-unified-iswa.cpp | 4 +- src/llama-kv-cache-unified-iswa.h | 2 +- src/llama-kv-cache-unified.cpp | 4 +- src/llama-kv-cache-unified.h | 2 +- src/llama-memory.h | 2 +- tools/server/server.cpp | 98 +++++++++++++++++++---------- 16 files changed, 159 insertions(+), 114 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index 0d0daa361..231de227a 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -988,10 +988,6 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context params.tensor_buft_overrides.push_back({nullptr, nullptr}); } - if (params.reranking && params.embedding) { - throw std::invalid_argument("error: either --embedding or --reranking can be specified, but not both"); - } - if (!params.chat_template.empty() && !common_chat_verify_template(params.chat_template, params.use_jinja)) { throw std::runtime_error(string_format( "error: the supplied chat template is not supported: %s%s\n", @@ -2747,9 +2743,10 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_EMBEDDINGS")); add_opt(common_arg( {"--reranking", "--rerank"}, - string_format("enable reranking endpoint on server (default: %s)", params.reranking ? "enabled" : "disabled"), + string_format("enable reranking endpoint on server (default: %s)", "disabled"), [](common_params & params) { - params.reranking = true; + params.embedding = true; + params.pooling_type = LLAMA_POOLING_TYPE_RANK; } ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_RERANKING")); add_opt(common_arg( diff --git a/common/common.cpp b/common/common.cpp index e23887c70..5b465150f 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -897,34 +897,6 @@ struct common_init_result common_init_from_params(common_params & params) { const llama_vocab * vocab = llama_model_get_vocab(model); - if (params.reranking) { - bool ok = true; - - if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__); - ok = false; - } - - bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL; - bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL; - - if (!has_eos && !has_sep) { - LOG_WRN("%s: warning: vocab does not have an EOS token or SEP token, reranking will not work\n", __func__); - ok = false; - } else if (!has_eos) { - LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__); - } else if (!has_sep) { - LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__); - ok = false; - } - - if (!ok) { - llama_model_free(model); - - return iparams; - } - } - auto cparams = common_context_params_to_llama(params); llama_context * lctx = llama_init_from_model(model, cparams); @@ -966,6 +938,35 @@ struct common_init_result common_init_from_params(common_params & params) { } } + if (llama_pooling_type(lctx) == LLAMA_POOLING_TYPE_RANK) { + bool ok = true; + + if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__); + ok = false; + } + + bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL; + bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL; + + if (!has_eos && !has_sep) { + LOG_WRN("%s: warning: vocab does not have an EOS token or SEP token, reranking will not work\n", __func__); + ok = false; + } else if (!has_eos) { + LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__); + } else if (!has_sep) { + LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__); + ok = false; + } + + if (!ok) { + llama_free(lctx); + llama_model_free(model); + + return iparams; + } + } + // load and optionally apply lora adapters for (auto & la : params.lora_adapters) { llama_adapter_lora_ptr lora; @@ -1143,11 +1144,6 @@ struct llama_context_params common_context_params_to_llama(const common_params & cparams.op_offload = !params.no_op_offload; cparams.swa_full = params.swa_full; - if (params.reranking) { - cparams.embeddings = true; - cparams.pooling_type = LLAMA_POOLING_TYPE_RANK; - } - cparams.type_k = params.cache_type_k; cparams.type_v = params.cache_type_v; diff --git a/common/common.h b/common/common.h index f26724b6e..00b6ca03a 100644 --- a/common/common.h +++ b/common/common.h @@ -355,7 +355,6 @@ struct common_params { int32_t embd_normalize = 2; // normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm) std::string embd_out = ""; // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix std::string embd_sep = "\n"; // separator of embeddings - bool reranking = false; // enable reranking support on server // server params int32_t port = 8080; // server listens on this network port diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp index 041da61c7..bdab052c3 100644 --- a/examples/gritlm/gritlm.cpp +++ b/examples/gritlm/gritlm.cpp @@ -41,12 +41,11 @@ static std::vector> encode(llama_context * ctx, const std::ve // add input to batch (this increments n_tokens) for (int32_t j = 0; j < n_toks; j++) { - common_batch_add(batch, inputs[j], j, { 0 }, j >= n_inst); + common_batch_add(batch, inputs[j], j, { 0 }, true); } // clear previous kv_cache values (irrelevant for embeddings) llama_memory_clear(llama_get_memory(ctx), true); - llama_set_embeddings(ctx, true); llama_set_causal_attn(ctx, false); // run model @@ -103,7 +102,6 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std llama_token eos_token = llama_vocab_eos(vocab); llama_memory_clear(llama_get_memory(ctx), true); - llama_set_embeddings(ctx, false); llama_set_causal_attn(ctx, true); llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1); @@ -166,6 +164,8 @@ int main(int argc, char * argv[]) { llama_model_params mparams = common_model_params_to_llama(params); llama_context_params cparams = common_context_params_to_llama(params); + cparams.embeddings = true; + llama_backend_init(); llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); @@ -213,6 +213,8 @@ int main(int argc, char * argv[]) { std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[1].c_str(), documents[1].c_str(), cosine_sim_q1_d1); } + llama_set_embeddings(ctx, false); + // ### Generation ### // GritLM models are not finetuned with system prompts, as you can just include system-like instructions together with your user instruction { diff --git a/include/llama.h b/include/llama.h index d5e4cef68..b086b68e6 100644 --- a/include/llama.h +++ b/include/llama.h @@ -254,7 +254,10 @@ extern "C" { // - seq_id : the sequence to which the respective token belongs // (if set to NULL, the sequence ID will be assumed to be 0) // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output - // (if set to NULL, only the logits for last token will be returned) + // (if set to NULL: + // - if embeddings: all tokens are output + // - if not: only the last token is output + // ) // typedef struct llama_batch { int32_t n_tokens; @@ -262,8 +265,8 @@ extern "C" { llama_token * token; float * embd; llama_pos * pos; - int32_t * n_seq_id; // TODO: remove, should belong to only 1 sequence - llama_seq_id ** seq_id; // TODO: become llama_seq_id * seq_id; + int32_t * n_seq_id; + llama_seq_id ** seq_id; int8_t * logits; // TODO: rename this to "output" } llama_batch; @@ -961,8 +964,7 @@ extern "C" { // Get the number of threads used for prompt and batch processing (multiple token). LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx); - // Set whether the model is in embeddings mode or not - // If true, embeddings will be returned but logits will not + // Set whether the context outputs embeddings or not LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings); // Set whether to use causal attention or not diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index a9f4a3d4c..8b6d14fe8 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -299,7 +299,8 @@ llama_batch_allocr::llama_batch_allocr() { bool llama_batch_allocr::init( const llama_batch & batch_inp, const llama_vocab & vocab, - const llama_memory_i * memory) { + const llama_memory_i * memory, + bool embd_all) { clear(); batch = batch_inp; @@ -378,10 +379,31 @@ bool llama_batch_allocr::init( } if (!batch.logits) { - // by default return the output only for the last token - output.resize(batch.n_tokens); - output[output.size() - 1] = true; + if (embd_all) { + // return the output for all tokens + output.resize(batch.n_tokens, true); + } else { + // return the output only for the last token + output.resize(batch.n_tokens, false); + output[output.size() - 1] = true; + } + batch.logits = output.data(); + } else if (embd_all) { + bool warn = false; + + for (int32_t i = 0; i < batch.n_tokens; ++i) { + if (batch.logits[i] == 0) { + warn = true; + } + } + + if (warn) { + LLAMA_LOG_WARN("%s: embeddings required but some input tokens were not marked as outputs -> overriding\n", __func__); + + output.resize(batch.n_tokens, true); + batch.logits = output.data(); + } } // diff --git a/src/llama-batch.h b/src/llama-batch.h index 04501ce5d..a555c1572 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -88,7 +88,8 @@ public: bool init( const llama_batch & batch_inp, const llama_vocab & vocab, - const llama_memory_i * memory); + const llama_memory_i * memory, + bool embd_all); const llama_batch & get_batch() const; diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 3a113d1bc..f56a58e9b 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -728,7 +728,7 @@ int llama_context::encode(const llama_batch & batch_inp) { } // note: during encode, we always pass the full sequence starting from pos = 0 - if (!batch_allocr->init(batch_inp, model.vocab, nullptr)) { + if (!batch_allocr->init(batch_inp, model.vocab, nullptr, true)) { LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); return -1; } @@ -894,7 +894,10 @@ int llama_context::decode(const llama_batch & batch_inp) { return -1; } - if (!batch_allocr->init(batch_inp, model.vocab, memory.get())) { + // when computing embeddings, all tokens are output + const bool embd_all = cparams.embeddings; + + if (!batch_allocr->init(batch_inp, model.vocab, memory.get(), embd_all)) { LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); return -1; } @@ -911,12 +914,9 @@ int llama_context::decode(const llama_batch & batch_inp) { GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT - // this indicates we are doing pooled embedding - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - const uint32_t n_outputs_all = batch_allocr->get_n_outputs(); - if (embd_pooled) { + if (embd_all) { // require that all tokens are output if (n_outputs_all != n_tokens_all) { LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n", @@ -945,7 +945,7 @@ int llama_context::decode(const llama_batch & batch_inp) { llama_memory_state_ptr mstate; while (true) { - mstate = memory->init_batch(batch, cparams.n_ubatch, embd_pooled); + mstate = memory->init_batch(batch, cparams.n_ubatch, embd_all); if (!mstate) { return -2; } @@ -1058,7 +1058,7 @@ int llama_context::decode(const llama_batch & batch_inp) { // ggml_graph_dump_dot(gf, NULL, "llama.dot"); //} - auto * t_logits = cparams.embeddings ? nullptr : res->get_logits(); + auto * t_logits = res->get_logits(); auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr; if (t_embd && res->get_embd_pooled()) { @@ -1222,9 +1222,8 @@ uint32_t llama_context::output_reserve(int32_t n_outputs) { const auto n_vocab = vocab.n_tokens(); const auto n_embd = hparams.n_embd; - // TODO: use a per-batch flag for logits presence instead - bool has_logits = !cparams.embeddings; - bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE); + bool has_logits = true; + bool has_embd = cparams.embeddings; // TODO: hacky enc-dec support if (model.arch == LLM_ARCH_T5) { @@ -2044,14 +2043,11 @@ void llama_context::opt_epoch_iter( n_queued_tokens += n_tokens_all; - // this indicates we are doing pooled embedding - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - embd_seq.clear(); uint32_t n_outputs_all = n_tokens_all; - auto mstate = memory->init_batch(batch, cparams.n_ubatch, embd_pooled); + auto mstate = memory->init_batch(batch, cparams.n_ubatch, true); if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); break; diff --git a/src/llama-kv-cache-recurrent.cpp b/src/llama-kv-cache-recurrent.cpp index de23b4ad2..8f6f120f6 100644 --- a/src/llama-kv-cache-recurrent.cpp +++ b/src/llama-kv-cache-recurrent.cpp @@ -359,9 +359,7 @@ llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const { return result; } -llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled) { - GGML_UNUSED(embd_pooled); - +llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { auto sbatch = llama_sbatch(batch, hparams.n_embd, false); std::vector ubatches; @@ -369,8 +367,8 @@ llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & while (sbatch.n_tokens > 0) { llama_ubatch ubatch; - if (embd_pooled) { - // Pooled embeddings cannot be split across ubatches (yet) + if (embd_all) { + // if all tokens are output, split by sequence ubatch = sbatch.split_seq(n_ubatch); } else { ubatch = sbatch.split_equal(n_ubatch); diff --git a/src/llama-kv-cache-recurrent.h b/src/llama-kv-cache-recurrent.h index d7c02ea87..f9b01a651 100644 --- a/src/llama-kv-cache-recurrent.h +++ b/src/llama-kv-cache-recurrent.h @@ -32,7 +32,7 @@ public: llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) override; + bool embd_all) override; llama_memory_state_ptr init_full() override; diff --git a/src/llama-kv-cache-unified-iswa.cpp b/src/llama-kv-cache-unified-iswa.cpp index 9814f7663..a4a4c2b1b 100644 --- a/src/llama-kv-cache-unified-iswa.cpp +++ b/src/llama-kv-cache-unified-iswa.cpp @@ -95,8 +95,8 @@ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const { return kv_swa->seq_pos_max(seq_id); } -llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled) { - GGML_UNUSED(embd_pooled); +llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { + GGML_UNUSED(embd_all); // first try simple split do { diff --git a/src/llama-kv-cache-unified-iswa.h b/src/llama-kv-cache-unified-iswa.h index d114c7378..6e941e1a4 100644 --- a/src/llama-kv-cache-unified-iswa.h +++ b/src/llama-kv-cache-unified-iswa.h @@ -34,7 +34,7 @@ public: llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) override; + bool embd_all) override; llama_memory_state_ptr init_full() override; diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index b17936abd..3b3767985 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -310,8 +310,8 @@ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const { llama_memory_state_ptr llama_kv_cache_unified::init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) { - GGML_UNUSED(embd_pooled); + bool embd_all) { + GGML_UNUSED(embd_all); do { auto sbatch = llama_sbatch(batch, hparams.n_embd, true); diff --git a/src/llama-kv-cache-unified.h b/src/llama-kv-cache-unified.h index d6dcd19f2..d96571d95 100644 --- a/src/llama-kv-cache-unified.h +++ b/src/llama-kv-cache-unified.h @@ -59,7 +59,7 @@ public: llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) override; + bool embd_all) override; llama_memory_state_ptr init_full() override; diff --git a/src/llama-memory.h b/src/llama-memory.h index 42e226dc0..24668f861 100644 --- a/src/llama-memory.h +++ b/src/llama-memory.h @@ -73,7 +73,7 @@ struct llama_memory_i { virtual llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) = 0; + bool embd_all) = 0; // simulate full cache, used for allocating worst-case compute buffers virtual llama_memory_state_ptr init_full() = 0; diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 626c58bd3..c08e42125 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -88,6 +88,26 @@ enum error_type { ERROR_TYPE_NOT_SUPPORTED, // custom error }; +static bool server_task_type_need_embd(server_task_type task_type) { + switch (task_type) { + case SERVER_TASK_TYPE_EMBEDDING: + case SERVER_TASK_TYPE_RERANK: + return true; + default: + return false; + } +} + +static bool server_task_type_need_logits(server_task_type task_type) { + switch (task_type) { + case SERVER_TASK_TYPE_COMPLETION: + case SERVER_TASK_TYPE_INFILL: + return true; + default: + return false; + } +} + struct slot_params { bool stream = true; bool cache_prompt = true; // remember the prompt to avoid reprocessing all prompt @@ -1330,13 +1350,16 @@ struct server_slot { n_draft_accepted = 0; } - bool is_non_causal() const { - return task_type == SERVER_TASK_TYPE_EMBEDDING || task_type == SERVER_TASK_TYPE_RERANK; + bool need_embd() const { + return server_task_type_need_embd(task_type); + } + + bool need_logits() const { + return server_task_type_need_logits(task_type); } bool can_batch_with(server_slot & other_slot) const { - return is_non_causal() == other_slot.is_non_causal() - && are_lora_equal(lora, other_slot.lora); + return task_type == other_slot.task_type && are_lora_equal(lora, other_slot.lora); } bool has_budget(const common_params & global_params) { @@ -1480,7 +1503,6 @@ struct server_slot { {"n_ctx", n_ctx}, {"speculative", can_speculate()}, {"is_processing", is_processing()}, - {"non_causal", is_non_causal()}, {"params", params.to_json()}, {"prompt", prompt_tokens.detokenize(ctx, true)}, {"next_token", @@ -1907,6 +1929,14 @@ struct server_context { llama_batch_free(batch); } + // if the context does not have a memory module then all embeddings have to be computed within a single ubatch + // also we cannot split if the pooling would require any past tokens + bool can_split() const { + return + !llama_get_embeddings(ctx) || + (llama_get_memory(ctx) && llama_pooling_type(ctx) == LLAMA_POOLING_TYPE_LAST); + } + bool load_model(const common_params & params) { SRV_INF("loading model '%s'\n", params.model.path.c_str()); @@ -2730,6 +2760,7 @@ struct server_context { queue_tasks.defer(std::move(task)); break; } + if (slot->is_processing()) { // if requested slot is unavailable, we defer this task for processing later SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id); @@ -3092,7 +3123,14 @@ struct server_context { continue; } - if (slot.is_non_causal()) { + // TODO: support memory-less logits computation + if (slot.need_logits() && !llama_get_memory(ctx)) { + slot.release(); + send_error(slot, "the current context does not logits computation. skipping", ERROR_TYPE_SERVER); + continue; + } + + if (!can_split()) { if (slot.n_prompt_tokens > n_ubatch) { slot.release(); send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER); @@ -3227,8 +3265,7 @@ struct server_context { } if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0) { - // we have to evaluate at least 1 token to generate logits. - SLT_WRN(slot, "need to evaluate at least 1 token to generate logits, n_past = %d, n_prompt_tokens = %d\n", slot.n_past, slot.n_prompt_tokens); + SLT_WRN(slot, "need to evaluate at least 1 token for each active slot, n_past = %d, n_prompt_tokens = %d\n", slot.n_past, slot.n_prompt_tokens); slot.n_past--; } @@ -3236,8 +3273,7 @@ struct server_context { slot.n_prompt_tokens_processed = 0; } - // non-causal tasks require to fit the entire prompt in the physical batch - if (slot.is_non_causal()) { + if (!can_split()) { // cannot fit the prompt in the current batch - will try next iter if (batch.n_tokens + slot.n_prompt_tokens > n_batch) { continue; @@ -3259,8 +3295,7 @@ struct server_context { slot.cache_tokens.keep_first(slot.n_past); // check if we should process the image - if (slot.n_past < slot.n_prompt_tokens - && slot.prompt_tokens[slot.n_past] == LLAMA_TOKEN_NULL) { + if (slot.n_past < slot.n_prompt_tokens && slot.prompt_tokens[slot.n_past] == LLAMA_TOKEN_NULL) { // process the image int32_t new_n_past; int32_t res = slot.prompt_tokens.process_chunk(ctx, mctx, slot.n_past, slot.id, new_n_past); @@ -3291,8 +3326,8 @@ struct server_context { break; // end of text chunk } - // without pooling, we want to output the embeddings for all the tokens in the batch - const bool need_embd = slot.task_type == SERVER_TASK_TYPE_EMBEDDING && llama_pooling_type(slot.ctx) == LLAMA_POOLING_TYPE_NONE; + // embedding requires all tokens in the batch to be output + const bool need_embd = server_task_type_need_embd(slot.task_type); common_batch_add(batch, cur_tok, slot.n_past, { slot.id }, need_embd); slot.cache_tokens.push_back(cur_tok); @@ -3346,17 +3381,15 @@ struct server_context { SRV_DBG("decoding batch, n_tokens = %d\n", batch.n_tokens); if (slot_batched) { - // make sure we're in the right embedding mode - llama_set_embeddings(ctx, slot_batched->is_non_causal()); // apply lora, only need to do it once per batch common_set_adapter_lora(ctx, slot_batched->lora); - } - const bool do_encode = (params_base.embedding || params_base.reranking); + llama_set_embeddings(ctx, slot_batched->need_embd()); + } // pad the batch so that batch.n_tokens >= n_slots // TODO: temporary workaround for https://github.com/ggml-org/llama.cpp/issues/13689 - if (do_encode) { + if (slot_batched->need_embd()) { const int n_slots = slots.size(); if (batch.n_tokens < n_slots) { @@ -3378,8 +3411,11 @@ struct server_context { SRV_WRN("adding %d dummy tokens to the batch, seq_id = %d\n", n_add, seq_id); for (int j = 0; j < n_add; ++j) { - common_batch_add(batch, 0, j, { seq_id }, false); + common_batch_add(batch, 0, j, { seq_id }, true); } + + slots[seq_id].cache_tokens.clear(); + llama_memory_seq_rm(llama_get_memory(ctx), seq_id, -1, -1); } } @@ -4174,11 +4210,6 @@ int main(int argc, char ** argv) { oaicompat_type oaicompat) -> void { GGML_ASSERT(type == SERVER_TASK_TYPE_COMPLETION || type == SERVER_TASK_TYPE_INFILL); - if (ctx_server.params_base.embedding) { - res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); - return; - } - auto completion_id = gen_chatcmplid(); std::unordered_set task_ids; try { @@ -4433,12 +4464,8 @@ int main(int argc, char ** argv) { OAICOMPAT_TYPE_NONE); // infill is not OAI compatible }; - const auto handle_chat_completions = [&ctx_server, &res_error, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { + const auto handle_chat_completions = [&ctx_server, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { LOG_DBG("request: %s\n", req.body.c_str()); - if (ctx_server.params_base.embedding) { - res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); - return; - } auto body = json::parse(req.body); std::vector files; @@ -4566,13 +4593,18 @@ int main(int argc, char ** argv) { }; const auto handle_embeddings_impl = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res, oaicompat_type oaicompat) { - const json body = json::parse(req.body); + if (!ctx_server.params_base.embedding) { + res_error(res, format_error_response("This server does not support embeddings. Start it with `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); + return; + } if (oaicompat != OAICOMPAT_TYPE_NONE && llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) { res_error(res, format_error_response("Pooling type 'none' is not OAI compatible. Please use a different pooling type", ERROR_TYPE_INVALID_REQUEST)); return; } + const json body = json::parse(req.body); + // for the shape of input/content, see tokenize_input_prompts() json prompt; if (body.count("input") != 0) { @@ -4662,8 +4694,8 @@ int main(int argc, char ** argv) { }; const auto handle_rerank = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) { - if (!ctx_server.params_base.reranking || ctx_server.params_base.embedding) { - res_error(res, format_error_response("This server does not support reranking. Start it with `--reranking` and without `--embedding`", ERROR_TYPE_NOT_SUPPORTED)); + if (!ctx_server.params_base.embedding || ctx_server.params_base.pooling_type != LLAMA_POOLING_TYPE_RANK) { + res_error(res, format_error_response("This server does not support reranking. Start it with `--reranking`", ERROR_TYPE_NOT_SUPPORTED)); return; } From 7d6d91babfa129906b39c9099eca4234c44f4f1e Mon Sep 17 00:00:00 2001 From: uvos Date: Mon, 16 Jun 2025 13:47:38 +0200 Subject: [PATCH 19/26] HIP: disable rocwmma on gfx12 by default until rocm 7.0 (#14202) --- ggml/CMakeLists.txt | 1 + ggml/src/ggml-cuda/common.cuh | 4 ++-- ggml/src/ggml-hip/CMakeLists.txt | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 727139cf3..7b398ae8e 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -172,6 +172,7 @@ option(GGML_HIP "ggml: use HIP" option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF) option(GGML_HIP_NO_VMM "ggml: do not try to use HIP VMM" ON) option(GGML_HIP_ROCWMMA_FATTN "ggml: enable rocWMMA for FlashAttention" OFF) +option(GGML_HIP_FORCE_ROCWMMA_FATTN_GFX12 "ggml: enable rocWMMA FlashAttention on GFX12" OFF) option(GGML_VULKAN "ggml: use Vulkan" OFF) option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF) option(GGML_VULKAN_DEBUG "ggml: enable Vulkan debug output" OFF) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 563a7828b..c14a12f54 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -207,9 +207,9 @@ typedef float2 dfloat2; #define FP16_MMA_AVAILABLE #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA -#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4)) +#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || (defined(GGML_HIP_ROCWMMA_FATTN_GFX12) && defined(RDNA4))) #define FP16_MMA_AVAILABLE -#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4)) +#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || (defined(GGML_HIP_ROCWMMA_FATTN_GFX12) && defined(RDNA4))) #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING #define NEW_MMA_AVAILABLE diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index 1fe8fe3b8..e29df9856 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -113,6 +113,10 @@ if (GGML_HIP_ROCWMMA_FATTN) add_compile_definitions(GGML_HIP_ROCWMMA_FATTN) endif() +if (GGML_HIP_FORCE_ROCWMMA_FATTN_GFX12 OR ${hip_VERSION} VERSION_GREATER_EQUAL 7.0) + add_compile_definitions(GGML_HIP_ROCWMMA_FATTN_GFX12) +endif() + if (NOT GGML_CUDA_FA) add_compile_definitions(GGML_CUDA_NO_FA) endif() From ad590be98c83217fcf1a101d78d9ab389fd5dc0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90inh=20Tr=E1=BB=8Dng=20Huy?= <77562200+huydt84@users.noreply.github.com> Date: Mon, 16 Jun 2025 21:53:41 +0900 Subject: [PATCH 20/26] model : add NeoBERT (#14164) * convert neobert model to gguf * add inference graph * fix flake8 lint * followed reviewer suggestions Co-authored-by: Georgi Gerganov * follow reviewers suggestions Co-authored-by: Georgi Gerganov * override NeoBERT feed-forward length --------- Co-authored-by: dinhhuy Co-authored-by: Georgi Gerganov --- convert_hf_to_gguf.py | 30 ++++++- gguf-py/gguf/constants.py | 14 +++ gguf-py/gguf/tensor_mapping.py | 9 ++ src/llama-arch.cpp | 16 ++++ src/llama-arch.h | 1 + src/llama-model.cpp | 153 +++++++++++++++++++++++++++++++++ 6 files changed, 222 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 2232a7d82..58e455ae6 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -519,7 +519,7 @@ class TextModel(ModelBase): def set_gguf_parameters(self): self.gguf_writer.add_block_count(self.block_count) - if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions"], optional=True)) is not None: + if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions", "max_length"], optional=True)) is not None: self.gguf_writer.add_context_length(n_ctx) logger.info(f"gguf: context length = {n_ctx}") @@ -4076,6 +4076,34 @@ class NomicBertModel(BertModel): raise ValueError(f"unknown tokenizer: {toktyp}") +@ModelBase.register("NeoBERT", "NeoBERTLMHead", "NeoBERTForSequenceClassification") +class NeoBert(BertModel): + model_arch = gguf.MODEL_ARCH.NEO_BERT + + def set_gguf_parameters(self): + super().set_gguf_parameters() + + # NeoBERT uses 2/3 of the intermediate size as feed forward length + self.gguf_writer.add_feed_forward_length(int(2 * self.hparams["intermediate_size"] / 3)) + self.gguf_writer.add_rope_freq_base(10000.0) # default value for NeoBERT + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) + + f_rms_eps = self.hparams.get("norm_eps", 1e-6) # default value for NeoBERT + self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps) + logger.info(f"gguf: rms norm epsilon = {f_rms_eps}") + + self.gguf_writer.add_pooling_type(gguf.PoolingType.CLS) # https://huggingface.co/chandar-lab/NeoBERT#how-to-use + + def modify_tensors(self, data_torch, name, bid): + if name.startswith("decoder."): + return [] + + if name.startswith("model."): + name = name[6:] + + return super().modify_tensors(data_torch, name, bid) + + @ModelBase.register("XLMRobertaModel", "XLMRobertaForSequenceClassification") class XLMRobertaModel(BertModel): model_arch = gguf.MODEL_ARCH.BERT diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 9b2143c7c..834a1d5e1 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -291,6 +291,7 @@ class MODEL_ARCH(IntEnum): BERT = auto() NOMIC_BERT = auto() NOMIC_BERT_MOE = auto() + NEO_BERT = auto() JINA_BERT_V2 = auto() BLOOM = auto() STABLELM = auto() @@ -573,6 +574,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.BERT: "bert", MODEL_ARCH.NOMIC_BERT: "nomic-bert", MODEL_ARCH.NOMIC_BERT_MOE: "nomic-bert-moe", + MODEL_ARCH.NEO_BERT: "neo-bert", MODEL_ARCH.JINA_BERT_V2: "jina-bert-v2", MODEL_ARCH.BLOOM: "bloom", MODEL_ARCH.STABLELM: "stablelm", @@ -1081,6 +1083,18 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_UP_EXP, MODEL_TENSOR.LAYER_OUT_NORM, ], + MODEL_ARCH.NEO_BERT: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.ENC_OUTPUT_NORM, + MODEL_TENSOR.CLS, + MODEL_TENSOR.CLS_OUT, + ], MODEL_ARCH.JINA_BERT_V2: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.TOKEN_EMBD_NORM, diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 5e3f01754..79f044d2a 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -31,6 +31,7 @@ class TensorNameMap: "model.embeddings", # rwkv7 "model.word_embeddings", # bailingmoe "language_model.model.embed_tokens", # llama4 + "encoder", # neobert ), # Token type embeddings @@ -134,6 +135,7 @@ class TensorNameMap: "rwkv.blocks.{bid}.ln1", # rwkv6 "model.layers.{bid}.ln1", # rwkv7 "model.layers.{bid}.input_layernorm", # llama4 + "transformer_encoder.{bid}.attention_norm", # neobert ), # Attention norm 2 @@ -161,6 +163,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.qkv_proj", # phi3 "encoder.layers.{bid}.self_attention.query_key_value", # chatglm "transformer.layers.{bid}.attn.qkv_proj", # openelm + "transformer_encoder.{bid}.qkv", # neobert ), # Attention query @@ -236,6 +239,7 @@ class TensorNameMap: "transformer.layers.{bid}.attn.out_proj", # openelm "transformer.h.{bid}.attn.attention.out_proj", # exaone "model.layers.{bid}.self_attn.o_proj", # llama4 + "transformer_encoder.{bid}.wo", # neobert ), # Attention output norm @@ -276,6 +280,7 @@ class TensorNameMap: "encoder.layers.{bid}.post_attention_layernorm", # chatglm "transformer.layers.{bid}.ffn_norm", # openelm "model.layers.{bid}.post_attention_layernorm", # llama4 + "transformer_encoder.{bid}.ffn_norm", # neobert ), # Post feed-forward norm @@ -340,6 +345,7 @@ class TensorNameMap: "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm "transformer.h.{bid}.mlp.c_fc_1", # exaone "model.layers.{bid}.feed_forward.up_proj", # llama4 + "transformer_encoder.{bid}.ffn.w12", # neobert ), MODEL_TENSOR.FFN_UP_EXP: ( @@ -422,6 +428,7 @@ class TensorNameMap: "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm "model.layers.h.{bid}.mlp.c_proj", # exaone "model.layers.{bid}.feed_forward.down_proj", # llama4 + "transformer_encoder.{bid}.ffn.w3", # neobert ), MODEL_TENSOR.FFN_DOWN_EXP: ( @@ -832,12 +839,14 @@ class TensorNameMap: # TODO: these do not belong to block_mappings_cfg - move them to mappings_cfg MODEL_TENSOR.ENC_OUTPUT_NORM: ( "encoder.final_layer_norm", # t5 + "layer_norm", # neobert ), MODEL_TENSOR.CLS: ( "classifier", # jina "classifier.dense", # roberta "pre_classifier", # distillbert + "dense", # neobert ), MODEL_TENSOR.CLS_OUT: ( diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index a3e7c861c..de8d289cf 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -20,6 +20,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_BERT, "bert" }, { LLM_ARCH_NOMIC_BERT, "nomic-bert" }, { LLM_ARCH_NOMIC_BERT_MOE, "nomic-bert-moe" }, + { LLM_ARCH_NEO_BERT, "neo-bert" }, { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" }, { LLM_ARCH_BLOOM, "bloom" }, { LLM_ARCH_STABLELM, "stablelm" }, @@ -514,6 +515,21 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, }, }, + { + LLM_ARCH_NEO_BERT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" }, + { LLM_TENSOR_CLS, "cls" }, + { LLM_TENSOR_CLS_OUT, "cls.output" }, + }, + }, { LLM_ARCH_JINA_BERT_V2, { diff --git a/src/llama-arch.h b/src/llama-arch.h index 168fdcb40..3e8a61da3 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -24,6 +24,7 @@ enum llm_arch { LLM_ARCH_BERT, LLM_ARCH_NOMIC_BERT, LLM_ARCH_NOMIC_BERT_MOE, + LLM_ARCH_NEO_BERT, LLM_ARCH_JINA_BERT_V2, LLM_ARCH_BLOOM, LLM_ARCH_STABLELM, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index dcc8b0be7..a5eb122f9 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -749,6 +749,16 @@ void llama_model::load_hparams(llama_model_loader & ml) { } } } break; + case LLM_ARCH_NEO_BERT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type); + + if (hparams.n_layer == 28) { + type = LLM_TYPE_250M; + } + } break; case LLM_ARCH_BLOOM: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -2212,6 +2222,32 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}, 0); } } break; + case LLM_ARCH_NEO_BERT: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED); + cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {n_embd}, TENSOR_NOT_REQUIRED); + + cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED); + cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {hparams.n_cls_out}, TENSOR_NOT_REQUIRED); + + output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff*2}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + } + } break; case LLM_ARCH_JINA_BERT_V2: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); // word_embeddings @@ -6182,6 +6218,117 @@ struct llm_build_bert : public llm_graph_context { } }; +struct llm_build_neo_bert : public llm_graph_context { + llm_build_neo_bert(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + ggml_tensor * inp_pos = build_inp_pos(); + + // construct input embeddings (token, type, position) + inpL = build_inp_embd(model.tok_embd); + cb(inpL, "inp_embd", -1); + + auto * inp_attn = build_attn_inp_no_cache(); + + // iterate layers + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * cur = inpL; + + ggml_tensor * Qcur; + ggml_tensor * Kcur; + ggml_tensor * Vcur; + + // pre-norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + + // self-attention + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + // RoPE + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, nullptr, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + cb(cur, "kqv_out", il); + + if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // re-add the layer input + cur = ggml_add(ctx0, cur, inpL); + + ggml_tensor * ffn_inp = cur; + cb(ffn_inp, "ffn_inp", il); + + // pre-norm + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + cur = build_ffn(cur, + model.layers[il].ffn_up, + NULL, NULL, NULL, NULL, NULL, + model.layers[il].ffn_down, + NULL, NULL, NULL, + LLM_FFN_SWIGLU, LLM_FFN_SEQ, il); + + // attentions bypass the intermediate layer + cur = ggml_add(ctx0, cur, ffn_inp); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm_enc, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_embd", -1); + res->t_embd = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + struct llm_build_bloom : public llm_graph_context { llm_build_bloom(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; @@ -13595,6 +13742,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params, case LLM_ARCH_JINA_BERT_V2: case LLM_ARCH_NOMIC_BERT: case LLM_ARCH_NOMIC_BERT_MOE: + case LLM_ARCH_NEO_BERT: case LLM_ARCH_WAVTOKENIZER_DEC: { res = nullptr; @@ -13703,6 +13851,10 @@ llm_graph_result_ptr llama_model::build_graph( { llm = std::make_unique(*this, params, gf); } break; + case LLM_ARCH_NEO_BERT: + { + llm = std::make_unique(*this, params, gf); + } break; case LLM_ARCH_BLOOM: { llm = std::make_unique(*this, params, gf); @@ -14082,6 +14234,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_GRANITE_MOE: case LLM_ARCH_CHAMELEON: case LLM_ARCH_BAILINGMOE: + case LLM_ARCH_NEO_BERT: case LLM_ARCH_ARCEE: return LLAMA_ROPE_TYPE_NORM; From 0dbcabde8c006d5cf781ca0fe071c41559572a72 Mon Sep 17 00:00:00 2001 From: bandoti <141645996+bandoti@users.noreply.github.com> Date: Mon, 16 Jun 2025 10:32:13 -0300 Subject: [PATCH 21/26] cmake: clean up external project logic for vulkan-shaders-gen (#14179) * Remove install step for vulkan-shaders-gen * Add install step to normalize msvc with make * Regenerate modified shaders at build-time --- .github/workflows/build.yml | 2 +- ggml/src/ggml-vulkan/CMakeLists.txt | 49 ++++++++----------- .../ggml-vulkan/vulkan-shaders/CMakeLists.txt | 12 ----- 3 files changed, 22 insertions(+), 41 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5422dd817..85c4f3512 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -693,7 +693,7 @@ jobs: - build: 'openblas-x64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' - build: 'vulkan-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_VULKAN=ON' + defines: '-DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_VULKAN=ON' - build: 'llvm-arm64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON' - build: 'llvm-arm64-opencl-adreno' diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt index 4a88415f9..95e2ebe64 100644 --- a/ggml/src/ggml-vulkan/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/CMakeLists.txt @@ -49,15 +49,7 @@ if (Vulkan_FOUND) ../../include/ggml-vulkan.h ) - set(VULKAN_SHADER_GEN_CMAKE_ARGS - -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} - -DCMAKE_RUNTIME_OUTPUT_DIRECTORY=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} - ) - - set(VULKAN_SHADER_GEN_CMAKE_BUILD_ARGS "") - if (CMAKE_BUILD_TYPE AND CMAKE_BUILD_TYPE MATCHES "Debug|Release|MinSizeRel|RelWithDebInfo") - list(APPEND VULKAN_SHADER_GEN_CMAKE_BUILD_ARGS --config=${CMAKE_BUILD_TYPE}) - endif() + set(VULKAN_SHADER_GEN_CMAKE_ARGS "") # Test all shader extensions test_shader_extension_support( @@ -136,42 +128,39 @@ if (Vulkan_FOUND) set(HOST_CMAKE_TOOLCHAIN_FILE "") endif() - # Always use ExternalProject_Add approach include(ExternalProject) - # Add toolchain file if cross-compiling if (CMAKE_CROSSCOMPILING) list(APPEND VULKAN_SHADER_GEN_CMAKE_ARGS -DCMAKE_TOOLCHAIN_FILE=${HOST_CMAKE_TOOLCHAIN_FILE}) message(STATUS "vulkan-shaders-gen toolchain file: ${HOST_CMAKE_TOOLCHAIN_FILE}") endif() - # Native build through ExternalProject_Add ExternalProject_Add( vulkan-shaders-gen SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders - CMAKE_ARGS ${VULKAN_SHADER_GEN_CMAKE_ARGS} - BUILD_COMMAND ${CMAKE_COMMAND} --build . ${VULKAN_SHADER_GEN_CMAKE_BUILD_ARGS} - INSTALL_COMMAND ${CMAKE_COMMAND} --install . - INSTALL_DIR ${CMAKE_BINARY_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR}/$ + -DCMAKE_INSTALL_BINDIR=. + -DCMAKE_BUILD_TYPE=$ + ${VULKAN_SHADER_GEN_CMAKE_ARGS} + + BUILD_COMMAND ${CMAKE_COMMAND} --build . --config $ + INSTALL_COMMAND ${CMAKE_COMMAND} --install . --config $ ) ExternalProject_Add_StepTargets(vulkan-shaders-gen build install) set (_ggml_vk_host_suffix $,.exe,>) - set (_ggml_vk_genshaders_cmd ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/vulkan-shaders-gen${_ggml_vk_host_suffix}) - set (_ggml_vk_header ${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.hpp) - set (_ggml_vk_source ${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.cpp) - set (_ggml_vk_input_dir ${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders) - set (_ggml_vk_output_dir ${CMAKE_CURRENT_BINARY_DIR}/vulkan-shaders.spv) + set (_ggml_vk_genshaders_dir "${CMAKE_BINARY_DIR}/$") + set (_ggml_vk_genshaders_cmd "${_ggml_vk_genshaders_dir}/vulkan-shaders-gen${_ggml_vk_host_suffix}") + set (_ggml_vk_header "${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.hpp") + set (_ggml_vk_source "${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.cpp") + set (_ggml_vk_input_dir "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders") + set (_ggml_vk_output_dir "${CMAKE_CURRENT_BINARY_DIR}/vulkan-shaders.spv") - file(GLOB _ggml_vk_shader_deps "${_ggml_vk_input_dir}/*.comp") - set (_ggml_vk_shader_deps ${_ggml_vk_shader_deps} vulkan-shaders-gen) - - # Add build and install dependencies for all builds - set(_ggml_vk_shader_deps ${_ggml_vk_shader_deps} vulkan-shaders-gen-build vulkan-shaders-gen-install) + file(GLOB _ggml_vk_shader_files CONFIGURE_DEPENDS "${_ggml_vk_input_dir}/*.comp") add_custom_command( OUTPUT ${_ggml_vk_header} - ${_ggml_vk_source} + ${_ggml_vk_source} COMMAND ${_ggml_vk_genshaders_cmd} --glslc ${Vulkan_GLSLC_EXECUTABLE} @@ -181,7 +170,11 @@ if (Vulkan_FOUND) --target-cpp ${_ggml_vk_source} --no-clean - DEPENDS ${_ggml_vk_shader_deps} + DEPENDS ${_ggml_vk_shader_files} + vulkan-shaders-gen + vulkan-shaders-gen-build + vulkan-shaders-gen-install + COMMENT "Generate vulkan shaders" ) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt index e60e9d1e5..14e9daaa0 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt @@ -25,15 +25,3 @@ add_executable(${TARGET} vulkan-shaders-gen.cpp) install(TARGETS ${TARGET} RUNTIME) target_compile_features(${TARGET} PRIVATE cxx_std_17) target_link_libraries(vulkan-shaders-gen PUBLIC Threads::Threads) - -# Configure output directories for MSVC builds -if(MSVC) - # Get the main project's runtime output directory if possible - if(DEFINED CMAKE_RUNTIME_OUTPUT_DIRECTORY) - foreach(CONFIG ${CMAKE_CONFIGURATION_TYPES}) - string(TOUPPER ${CONFIG} CONFIG) - set_target_properties(${TARGET} PROPERTIES - RUNTIME_OUTPUT_DIRECTORY_${CONFIG} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) - endforeach() - endif() -endif() From 6adc3c3ebc029af058ac950a8e2a825fdf18ecc6 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Mon, 16 Jun 2025 08:11:43 -0700 Subject: [PATCH 22/26] llama : add thread safety test (#14035) * llama : add thread safety test * llamafile : remove global state * llama : better LLAMA_SPLIT_MODE_NONE logic when main_gpu < 0 GPU devices are not used --------- Co-authored-by: Georgi Gerganov --- .github/workflows/build.yml | 1 + ci/run.sh | 2 +- common/common.cpp | 16 ++- ggml/src/ggml-cpu/ggml-cpu-impl.h | 3 + ggml/src/ggml-cpu/ggml-cpu.c | 8 ++ ggml/src/ggml-cpu/llamafile/sgemm.cpp | 8 +- src/llama.cpp | 18 +-- tests/CMakeLists.txt | 2 + tests/test-thread-safety.cpp | 152 ++++++++++++++++++++++++++ 9 files changed, 192 insertions(+), 18 deletions(-) create mode 100644 tests/test-thread-safety.cpp diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 85c4f3512..c4783a6df 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -778,6 +778,7 @@ jobs: cmake -S . -B build ${{ matrix.defines }} ` -DCURL_LIBRARY="$env:CURL_PATH/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:CURL_PATH/include" cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} + cp $env:CURL_PATH/bin/libcurl-*.dll build/bin/Release - name: Add libopenblas.dll id: add_libopenblas_dll diff --git a/ci/run.sh b/ci/run.sh index 2968a7dd4..940055705 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -39,7 +39,7 @@ sd=`dirname $0` cd $sd/../ SRC=`pwd` -CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=OFF" +CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON" if [ ! -z ${GG_BUILD_METAL} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON -DGGML_METAL_USE_BF16=ON" diff --git a/common/common.cpp b/common/common.cpp index 5b465150f..eb80cee08 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -767,6 +767,9 @@ bool fs_validate_filename(const std::string & filename) { return true; } +#include + + // returns true if successful, false otherwise bool fs_create_directory_with_parents(const std::string & path) { #ifdef _WIN32 @@ -784,9 +787,16 @@ bool fs_create_directory_with_parents(const std::string & path) { // process path from front to back, procedurally creating directories while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) { const std::wstring subpath = wpath.substr(0, pos_slash); - const wchar_t * test = subpath.c_str(); - const bool success = CreateDirectoryW(test, NULL); + pos_slash += 1; + + // skip the drive letter, in some systems it can return an access denied error + if (subpath.length() == 2 && subpath[1] == ':') { + continue; + } + + const bool success = CreateDirectoryW(subpath.c_str(), NULL); + if (!success) { const DWORD error = GetLastError(); @@ -800,8 +810,6 @@ bool fs_create_directory_with_parents(const std::string & path) { return false; } } - - pos_slash += 1; } return true; diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index 9662e4d7b..ae68cd006 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -503,6 +503,9 @@ static __m256 __lasx_xvreplfr2vr_s(const float val) { // TODO: move to ggml-threading void ggml_barrier(struct ggml_threadpool * tp); +void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value); +int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value); + #ifdef __cplusplus } #endif diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index ff28bf98b..2c12e493b 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -559,6 +559,14 @@ void ggml_barrier(struct ggml_threadpool * tp) { #endif } +void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value) { + atomic_store_explicit(&tp->current_chunk, value, memory_order_relaxed); +} + +int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value) { + return atomic_fetch_add_explicit(&tp->current_chunk, value, memory_order_relaxed); +} + #if defined(__gnu_linux__) static cpu_set_t ggml_get_numa_affinity(void) { cpu_set_t cpuset; diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index 1d46158f9..1c545f803 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -53,7 +53,6 @@ #include "ggml-cpu-impl.h" #include "ggml-quants.h" -#include #include #include @@ -394,8 +393,6 @@ class tinyBLAS { template NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) { - static std::atomic current_chunk; - GGML_ASSERT(m % (RM * BM) == 0); const int64_t ytiles = m / (RM * BM); const int64_t xtiles = (n + RN -1) / RN; @@ -410,7 +407,7 @@ class tinyBLAS { if (params->ith == 0) { GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles); // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. - std::atomic_store_explicit(¤t_chunk, (int64_t)params->nth, std::memory_order_relaxed); + ggml_threadpool_chunk_set(params->threadpool, params->nth); } ggml_barrier(params->threadpool); @@ -439,8 +436,7 @@ class tinyBLAS { GGML_ASSERT(jj == jj2); } - // next step. - job = std::atomic_fetch_add_explicit(¤t_chunk, (int64_t)1, std::memory_order_relaxed); + job = ggml_threadpool_chunk_add(params->threadpool, 1); } ggml_barrier(params->threadpool); diff --git a/src/llama.cpp b/src/llama.cpp index 2f06e0f8c..34906cdb6 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -198,14 +198,18 @@ static struct llama_model * llama_model_load_from_file_impl( // if using single GPU mode, remove all except the main GPU if (params.split_mode == LLAMA_SPLIT_MODE_NONE) { - if (params.main_gpu < 0 || params.main_gpu >= (int)model->devices.size()) { - LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %d)\n", __func__, params.main_gpu, (int)model->devices.size()); - llama_model_free(model); - return nullptr; + if (params.main_gpu < 0) { + model->devices.clear(); + } else { + if (params.main_gpu >= (int)model->devices.size()) { + LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %zu)\n", __func__, params.main_gpu, model->devices.size()); + llama_model_free(model); + return nullptr; + } + ggml_backend_dev_t main_gpu = model->devices[params.main_gpu]; + model->devices.clear(); + model->devices.push_back(main_gpu); } - ggml_backend_dev_t main_gpu = model->devices[params.main_gpu]; - model->devices.clear(); - model->devices.push_back(main_gpu); } for (auto * dev : model->devices) { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index db4b2cf65..fc1557a2d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -185,6 +185,8 @@ llama_build_and_test(test-json-partial.cpp) llama_build_and_test(test-log.cpp) llama_build_and_test(test-regex-partial.cpp) +llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/models -hff tinyllamas/stories15M-q4_0.gguf -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4) + # this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135) if (NOT WIN32) llama_build_and_test(test-arg-parser.cpp) diff --git a/tests/test-thread-safety.cpp b/tests/test-thread-safety.cpp new file mode 100644 index 000000000..d525b7430 --- /dev/null +++ b/tests/test-thread-safety.cpp @@ -0,0 +1,152 @@ +// thread safety test +// - Loads a copy of the same model on each GPU, plus a copy on the CPU +// - Creates n_parallel (--parallel) contexts per model +// - Runs inference in parallel on each context + +#include +#include +#include +#include "llama.h" +#include "arg.h" +#include "common.h" +#include "log.h" +#include "sampling.h" + +int main(int argc, char ** argv) { + common_params params; + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { + return 1; + } + + common_init(); + + llama_backend_init(); + llama_numa_init(params.numa); + + LOG_INF("%s\n", common_params_get_system_info(params).c_str()); + + //llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) { + // if (level == GGML_LOG_LEVEL_ERROR) { + // common_log_add(common_log_main(), level, "%s", text); + // } + //}, NULL); + + auto cparams = common_context_params_to_llama(params); + + int dev_count = ggml_backend_dev_count(); + int gpu_dev_count = 0; + for (int i = 0; i < dev_count; ++i) { + auto * dev = ggml_backend_dev_get(i); + if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) { + gpu_dev_count++; + } + } + const int num_models = gpu_dev_count + 1 + 1; // GPUs + 1 CPU model + 1 layer split + //const int num_models = std::max(1, gpu_dev_count); + const int num_contexts = std::max(1, params.n_parallel); + + std::vector models; + std::vector threads; + std::atomic failed = false; + + for (int m = 0; m < num_models; ++m) { + auto mparams = common_model_params_to_llama(params); + + if (m < gpu_dev_count) { + mparams.split_mode = LLAMA_SPLIT_MODE_NONE; + mparams.main_gpu = m; + } else if (m == gpu_dev_count) { + mparams.split_mode = LLAMA_SPLIT_MODE_NONE; + mparams.main_gpu = -1; // CPU model + } else { + mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;; + } + + llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); + if (model == NULL) { + LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str()); + return 1; + } + + models.emplace_back(model); + } + + for (int m = 0; m < num_models; ++m) { + auto * model = models[m].get(); + for (int c = 0; c < num_contexts; ++c) { + threads.emplace_back([&, m, c, model]() { + LOG_INF("Creating context %d/%d for model %d/%d\n", c + 1, num_contexts, m + 1, num_models); + + llama_context_ptr ctx { llama_init_from_model(model, cparams) }; + if (ctx == NULL) { + LOG_ERR("failed to create context\n"); + failed.store(true); + return; + } + + std::unique_ptr sampler { common_sampler_init(model, params.sampling), common_sampler_free }; + if (sampler == NULL) { + LOG_ERR("failed to create sampler\n"); + failed.store(true); + return; + } + + llama_batch batch = {}; + { + auto prompt = common_tokenize(ctx.get(), params.prompt, true); + if (prompt.empty()) { + LOG_ERR("failed to tokenize prompt\n"); + failed.store(true); + return; + } + batch = llama_batch_get_one(prompt.data(), prompt.size()); + if (llama_decode(ctx.get(), batch)) { + LOG_ERR("failed to decode prompt\n"); + failed.store(true); + return; + } + } + + const auto * vocab = llama_model_get_vocab(model); + std::string result = params.prompt; + + for (int i = 0; i < params.n_predict; i++) { + llama_token token; + if (batch.n_tokens > 0) { + token = common_sampler_sample(sampler.get(), ctx.get(), batch.n_tokens - 1); + } else { + token = llama_vocab_bos(vocab); + } + + result += common_token_to_piece(ctx.get(), token); + + if (llama_vocab_is_eog(vocab, token)) { + break; + } + + batch = llama_batch_get_one(&token, 1); + if (llama_decode(ctx.get(), batch)) { + LOG_ERR("Model %d/%d, Context %d/%d: failed to decode\n", m + 1, num_models, c + 1, num_contexts); + failed.store(true); + return; + } + } + + LOG_INF("Model %d/%d, Context %d/%d: %s\n\n", m + 1, num_models, c + 1, num_contexts, result.c_str()); + }); + } + } + + for (auto & thread : threads) { + thread.join(); + } + + if (failed) { + LOG_ERR("One or more threads failed.\n"); + return 1; + } + + LOG_INF("All threads finished without errors.\n"); + return 0; +} From 89fea80d298184d1cd93564f48e060d9f541f4b4 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 16 Jun 2025 22:33:27 +0300 Subject: [PATCH 23/26] server : fix incorrect usage of llama_get_embeddings() (#14225) * server : fix incorrect usage of llama_get_embeddings() ggml-ci * cont : fix the fix ggml-ci --- include/llama.h | 1 + tools/server/server.cpp | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/include/llama.h b/include/llama.h index b086b68e6..635508b10 100644 --- a/include/llama.h +++ b/include/llama.h @@ -965,6 +965,7 @@ extern "C" { LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx); // Set whether the context outputs embeddings or not + // TODO: rename to avoid confusion with llama_get_embeddings() LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings); // Set whether to use causal attention or not diff --git a/tools/server/server.cpp b/tools/server/server.cpp index c08e42125..721d09182 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -1358,6 +1358,14 @@ struct server_slot { return server_task_type_need_logits(task_type); } + // if the context does not have a memory module then all embeddings have to be computed within a single ubatch + // also we cannot split if the pooling would require any past tokens + bool can_split() const { + return + !need_embd() || + (llama_get_memory(ctx) && llama_pooling_type(ctx) == LLAMA_POOLING_TYPE_LAST); + } + bool can_batch_with(server_slot & other_slot) const { return task_type == other_slot.task_type && are_lora_equal(lora, other_slot.lora); } @@ -1929,14 +1937,6 @@ struct server_context { llama_batch_free(batch); } - // if the context does not have a memory module then all embeddings have to be computed within a single ubatch - // also we cannot split if the pooling would require any past tokens - bool can_split() const { - return - !llama_get_embeddings(ctx) || - (llama_get_memory(ctx) && llama_pooling_type(ctx) == LLAMA_POOLING_TYPE_LAST); - } - bool load_model(const common_params & params) { SRV_INF("loading model '%s'\n", params.model.path.c_str()); @@ -3130,7 +3130,7 @@ struct server_context { continue; } - if (!can_split()) { + if (!slot.can_split()) { if (slot.n_prompt_tokens > n_ubatch) { slot.release(); send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER); @@ -3273,7 +3273,7 @@ struct server_context { slot.n_prompt_tokens_processed = 0; } - if (!can_split()) { + if (!slot.can_split()) { // cannot fit the prompt in the current batch - will try next iter if (batch.n_tokens + slot.n_prompt_tokens > n_batch) { continue; From e434e69183fd9e1031f4445002083178c331a28b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Mon, 16 Jun 2025 21:58:42 +0200 Subject: [PATCH 24/26] common : suggest --jinja when autodetection fails (#14222) --- common/chat.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/chat.cpp b/common/chat.cpp index 0dad14fba..7d9aaeb12 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1838,7 +1838,7 @@ static common_chat_params common_chat_templates_apply_legacy( if (res < 0) { // if the custom "tmpl" is not supported, we throw an error // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template() - throw std::runtime_error("this custom template is not supported"); + throw std::runtime_error("this custom template is not supported, try using --jinja"); } // if it turns out that our buffer is too small, we resize it From fe9d60e74a6cb71bcaed2029377bfa2872b4abb0 Mon Sep 17 00:00:00 2001 From: R0CKSTAR Date: Tue, 17 Jun 2025 17:48:08 +0800 Subject: [PATCH 25/26] musa: fix build warning (unused variable) (#14231) Signed-off-by: Xiaodong Ye --- ggml/src/ggml-cuda/ggml-cuda.cu | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 0bd2904e1..898b24341 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2664,7 +2664,9 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx ggml_backend_buft_is_cuda_split(node->src[j]->buffer->buft) || (integrated && ggml_backend_buft_is_cuda_host(node->src[j]->buffer->buft))); } } -#endif +#else + GGML_UNUSED(integrated); +#endif // NDEBUG bool ok = ggml_cuda_compute_forward(*cuda_ctx, node); if (!ok) { From 860a9e4eeff3eb2e7bd1cc38f65787cc6c8177af Mon Sep 17 00:00:00 2001 From: xctan Date: Tue, 17 Jun 2025 17:58:32 +0800 Subject: [PATCH 26/26] ggml-cpu : remove the weak alias trick (#14221) --- ggml/src/ggml-cpu/apple-fallback.h | 88 -------------- ggml/src/ggml-cpu/arch-fallback.h | 184 +++++++++++++++++++++++++++++ ggml/src/ggml-cpu/ggml-cpu-impl.h | 25 ---- ggml/src/ggml-cpu/quants.c | 28 +---- ggml/src/ggml-cpu/repack.cpp | 17 +-- ggml/src/ggml-cpu/repack.h | 5 - 6 files changed, 186 insertions(+), 161 deletions(-) delete mode 100644 ggml/src/ggml-cpu/apple-fallback.h create mode 100644 ggml/src/ggml-cpu/arch-fallback.h diff --git a/ggml/src/ggml-cpu/apple-fallback.h b/ggml/src/ggml-cpu/apple-fallback.h deleted file mode 100644 index f477505d7..000000000 --- a/ggml/src/ggml-cpu/apple-fallback.h +++ /dev/null @@ -1,88 +0,0 @@ -#pragma once - -// Solve alias issue for Apple targets (currently PowerPC, x86, and ARM64). -// Mach-O has a weak alias equivalent but no practical compiler support can -// be found, so we need to do it manually. -// ref: https://stackoverflow.com/questions/42757744 -// -// This file is a complement to native implementations in the `arch` folder. -// A kernel in quants.c or repack.cpp is either: -// - implemented in the `arch` folder, or -// - defined in this file to remove the `_generic` suffix - -#if defined(GGML_CPU_GENERIC) -// quants.c -#define quantize_row_q8_0_generic quantize_row_q8_0 -#define quantize_row_q8_1_generic quantize_row_q8_1 -#define quantize_row_q8_K_generic quantize_row_q8_K -#define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 -#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 -#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 -#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 -#define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 -#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K -#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K -#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K -#define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K -#define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K -#define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K -#define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K -#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K -#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K -#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K -#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K -#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K -#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K -#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K -#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 -#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K -// repack.cpp -#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 -#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 -#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 -#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 -#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 -#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 -#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K -#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 -#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 -#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 -#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 -#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K -#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 -#elif defined(__aarch64__) || defined(__arm__) -// repack.cpp -#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 -#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K -#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K -#elif defined(__x86_64__) || defined(__i386__) -// repack.cpp -#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 -#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 -#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 -#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 -#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 -#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 -#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 -#elif defined(__POWERPC__) -// ref: https://github.com/ggml-org/llama.cpp/pull/14146#issuecomment-2972561679 -// quants.c -#define quantize_row_q8_K_generic quantize_row_q8_K -#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K -#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K -#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K -// repack.cpp -#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 -#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 -#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 -#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 -#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 -#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 -#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K -#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 -#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 -#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 -#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 -#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K -#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 -#endif diff --git a/ggml/src/ggml-cpu/arch-fallback.h b/ggml/src/ggml-cpu/arch-fallback.h new file mode 100644 index 000000000..10e534251 --- /dev/null +++ b/ggml/src/ggml-cpu/arch-fallback.h @@ -0,0 +1,184 @@ +#pragma once + +// Rename `_generic` functions if no native implementation is available. +// This effectively selects the generic implementation. + +#if defined(GGML_CPU_GENERIC) +// quants.c +#define quantize_row_q8_0_generic quantize_row_q8_0 +#define quantize_row_q8_1_generic quantize_row_q8_1 +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 +#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 +#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 +#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 +#define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K +#define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K +#define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K +#define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K +#define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 +#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__aarch64__) || defined(__arm__) || defined(_M_ARM) || defined(_M_ARM64) +// repack.cpp +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#elif defined(__x86_64__) || defined(__i386__) || defined(_M_IX86) || defined(_M_X64) +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__POWERPC__) || defined(__powerpc__) +// ref: https://github.com/ggml-org/llama.cpp/pull/14146#issuecomment-2972561679 +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__loongarch64) +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__riscv) +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 +#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__s390x__) +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 +#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__wasm__) +// quants.c +#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 +#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#endif diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index ae68cd006..bbd93c0ef 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -509,28 +509,3 @@ int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value); #ifdef __cplusplus } #endif - -#define GGML_DO_PRAGMA_(x) _Pragma (#x) -#define GGML_DO_PRAGMA(x) GGML_DO_PRAGMA_(x) -#if defined(GGML_CPU_GENERIC) || defined(__HIPCC__) || defined(__APPLE__) -// Note for Apple targets: -// - clang: aliases are not supported on darwin -// - all native kernels need to be implemented in both x86 and arm files -// - on iOS, tvOS, and visionOS, if cmake cannot determine the target architecture, all `_generic` names are replaced by defines -# define GGML_WEAK_ALIAS(name, alias) -#elif defined(__GNUC__) -// GCC/Clang on *nix -# define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(weak name = alias) // NOLINT -#elif defined(_MSC_VER) && defined(_WIN64) -// MSVC -// Note: C name mangling varies across different calling conventions -// see https://learn.microsoft.com/en-us/cpp/build/reference/decorated-names?view=msvc-170 -# define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(comment(linker, "/alternatename:" #name "=" #alias)) -#elif defined(_MSC_VER) && defined(WIN32) -// ref: https://github.com/ggml-org/whisper.cpp/pull/3239#issuecomment-2958224591 -# define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(comment(linker, "/alternatename:_" #name "=_" #alias)) -#else -# error "Unsupported compiler for GGML_WEAK_ALIAS" -#endif - -#define GGML_CPU_NATIVE_IMPL(name) GGML_WEAK_ALIAS(name, name ## _generic) diff --git a/ggml/src/ggml-cpu/quants.c b/ggml/src/ggml-cpu/quants.c index 516c5b2ce..d2e705f28 100644 --- a/ggml/src/ggml-cpu/quants.c +++ b/ggml/src/ggml-cpu/quants.c @@ -5,9 +5,7 @@ #include "ggml-quants.h" #include "quants.h" -#if defined(__APPLE__) -#include "apple-fallback.h" -#endif +#include "arch-fallback.h" #include #include @@ -42,12 +40,10 @@ void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, in void quantize_row_q8_0_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_0_ref(x, y, k); } -GGML_CPU_NATIVE_IMPL(quantize_row_q8_0) void quantize_row_q8_1_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_1_ref(x, y, k); } -GGML_CPU_NATIVE_IMPL(quantize_row_q8_1) // // 2-6 bit quantization in super-blocks @@ -108,7 +104,6 @@ void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, void quantize_row_q8_K_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_K_ref(x, y, k); } -GGML_CPU_NATIVE_IMPL(quantize_row_q8_K) //===================================== Dot products ================================= @@ -147,7 +142,6 @@ void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q4_0_q8_0) // TODO: add WASM SIMD void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { @@ -185,7 +179,6 @@ void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q4_1_q8_1) void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; @@ -229,7 +222,6 @@ void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q5_0_q8_0) void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; @@ -273,7 +265,6 @@ void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q5_1_q8_1) void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; @@ -304,7 +295,6 @@ void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q8_0_q8_0) void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -357,7 +347,6 @@ void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_tq1_0_q8_K) void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -390,7 +379,6 @@ void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_tq2_0_q8_K) void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -443,7 +431,6 @@ void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c } *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q2_K_q8_K) void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -523,7 +510,6 @@ void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q3_K_q8_K) void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -599,7 +585,6 @@ void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q4_K_q8_K) void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -680,7 +665,6 @@ void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q5_K_q8_K) void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -736,7 +720,6 @@ void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q6_K_q8_K) void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -779,7 +762,6 @@ void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs } *s = 0.125f * sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq2_xxs_q8_K) void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -830,7 +812,6 @@ void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } *s = 0.125f * sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq2_xs_q8_K) void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -883,7 +864,6 @@ void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = 0.125f * sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq2_s_q8_K) void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -928,7 +908,6 @@ void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs } *s = 0.25f * sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq3_xxs_q8_K) void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -985,7 +964,6 @@ void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq3_s_q8_K) void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -1029,7 +1007,6 @@ void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq1_s_q8_K) void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -1091,7 +1068,6 @@ void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq1_m_q8_K) void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -1121,7 +1097,6 @@ void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq4_nl_q8_0) void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -1168,7 +1143,6 @@ void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq4_xs_q8_K) // ============================ 4-bit non-linear quants diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 604ccee90..5c6715d5c 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -8,9 +8,7 @@ #include "ggml-cpu-impl.h" #include "traits.h" -#if defined(__APPLE__) -#include "apple-fallback.h" -#endif +#include "arch-fallback.h" #include #include @@ -87,7 +85,6 @@ void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GG } } } -GGML_CPU_NATIVE_IMPL(ggml_quantize_mat_q8_0_4x4) void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); @@ -126,7 +123,6 @@ void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GG } } } -GGML_CPU_NATIVE_IMPL(ggml_quantize_mat_q8_0_4x8) void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK_K == 256); @@ -178,7 +174,6 @@ void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GG } } } -GGML_CPU_NATIVE_IMPL(ggml_quantize_mat_q8_K_4x8) } // extern "C" @@ -248,7 +243,6 @@ void ggml_gemv_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_0_4x4_q8_0) void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -293,7 +287,6 @@ void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_0_4x8_q8_0) void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -340,7 +333,6 @@ void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_0_8x8_q8_0) void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; @@ -419,7 +411,6 @@ void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_K_8x8_q8_K) void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -466,7 +457,6 @@ void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs } } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_iq4_nl_4x4_q8_0) void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -523,7 +513,6 @@ void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_0_4x4_q8_0) void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -578,7 +567,6 @@ void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_0_4x8_q8_0) void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -633,7 +621,6 @@ void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_0_8x8_q8_0) void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; @@ -723,7 +710,6 @@ void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_K_8x8_q8_K) void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -780,7 +766,6 @@ void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_iq4_nl_4x4_q8_0) } // extern "C" diff --git a/ggml/src/ggml-cpu/repack.h b/ggml/src/ggml-cpu/repack.h index b13d2d0c7..4421e5f8e 100644 --- a/ggml/src/ggml-cpu/repack.h +++ b/ggml/src/ggml-cpu/repack.h @@ -64,10 +64,6 @@ static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wro extern "C" { #endif -// Workaround for clang: -// clang++ complains: ``error: call to 'ggml_gemm_q4_0_4x4_q8_0' is ambiguous'' -// repro: https://godbolt.org/z/oKdeWKonM (ICE), https://godbolt.org/z/1szq6P36v (ambiguous call) -#if defined(GGML_CPU_CLANG_WORKAROUND) || defined(__APPLE__) || !(defined(__GNUC__) && defined(__clang__)) || defined(__HIPCC__) void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); @@ -81,7 +77,6 @@ void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); -#endif // !defined(__clang__) // Native implementations void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k);