From fcc1b43c06ca90a10ce2b5b563b4d2f066b6f392 Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Wed, 28 May 2025 23:24:33 +0800 Subject: [PATCH] embeddings change to encode --- otherarch/embeddings_adapter.cpp | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/otherarch/embeddings_adapter.cpp b/otherarch/embeddings_adapter.cpp index 9ea7e37d5..97d21633f 100644 --- a/otherarch/embeddings_adapter.cpp +++ b/otherarch/embeddings_adapter.cpp @@ -34,7 +34,7 @@ static void batch_add_seq(llama_batch & batch, const std::vector & toke } } -static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd, int embd_norm) { +static void batch_encode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd, int embd_norm) { const enum llama_pooling_type pooling_type = llama_pooling_type(ctx); const struct llama_model * model = llama_get_model(ctx); @@ -46,16 +46,10 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu { printf("\n%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq); } - if (llama_model_has_encoder(model) && !llama_model_has_decoder(model)) { - // encoder-only model - if (llama_encode(ctx, batch) < 0) { - printf("\n%s : failed to encode\n", __func__); - } - } else if (!llama_model_has_encoder(model) && llama_model_has_decoder(model)) { - // decoder-only model - if (llama_decode(ctx, batch) < 0) { - printf("\n%s : failed to decode\n", __func__); - } + + // run model + if (llama_encode(ctx, batch) < 0) { + printf("%s : failed to process\n", __func__); } for (int i = 0; i < batch.n_tokens; i++) { @@ -150,7 +144,7 @@ bool embeddingstype_load_model(const embeddings_load_model_inputs inputs) std::vector tmp = {1, 2, 3, 4}; llama_kv_self_clear(embeddings_ctx); - auto er = llama_decode(embeddings_ctx, llama_batch_get_one(tmp.data(), tmp.size())); + auto er = llama_encode(embeddings_ctx, llama_batch_get_one(tmp.data(), tmp.size())); if(er!=0) { printf("\nEmbeddings Model Eval returned nonzero: %d\n",er); @@ -265,7 +259,7 @@ embeddings_generation_outputs embeddingstype_generate(const embeddings_generatio // encode if at capacity if (batch.n_tokens + n_toks > n_batch) { float * out = emb + e * n_embd; - batch_decode(embeddings_ctx, batch, out, s, n_embd, embd_normalize); + batch_encode(embeddings_ctx, batch, out, s, n_embd, embd_normalize); e += pooling_type == LLAMA_POOLING_TYPE_NONE ? batch.n_tokens : s; s = 0; common_batch_clear(batch); @@ -277,7 +271,7 @@ embeddings_generation_outputs embeddingstype_generate(const embeddings_generatio // final batch float * out = emb + e * n_embd; - batch_decode(embeddings_ctx, batch, out, s, n_embd, embd_normalize); + batch_encode(embeddings_ctx, batch, out, s, n_embd, embd_normalize); std::string outputarray = "["; for (int i = 0; i < n_embd; i++) {