diff --git a/CMakeLists.txt b/CMakeLists.txt index f8a5f58f9..a12b94437 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -494,6 +494,13 @@ target_compile_features(tts_adapter PUBLIC cxx_std_17) # don't bump target_link_libraries(tts_adapter PRIVATE common2 ggml ${LLAMA_EXTRA_LIBS}) set_target_properties(tts_adapter PROPERTIES POSITION_INDEPENDENT_CODE ON) +add_library(embeddings_adapter + otherarch/embeddings_adapter.cpp) +target_include_directories(embeddings_adapter PUBLIC . ./ggml/include ./ggml/src ./ggml/src/ggml-cpu ./include ./otherarch ./otherarch/tools ./examples ./common) +target_compile_features(embeddings_adapter PUBLIC cxx_std_17) # don't bump +target_link_libraries(embeddings_adapter PRIVATE common2 ggml ${LLAMA_EXTRA_LIBS}) +set_target_properties(embeddings_adapter PROPERTIES POSITION_INDEPENDENT_CODE ON) + add_library(gpttype_adapter gpttype_adapter.cpp) target_include_directories(gpttype_adapter PUBLIC . ./ggml/include ./ggml/src ./ggml/src/ggml-cpu ./include ./otherarch ./otherarch/tools ./otherarch/sdcpp ./otherarch/sdcpp/thirdparty ./examples ./common) @@ -509,7 +516,7 @@ if (LLAMA_CUBLAS) set_target_properties(${TARGET} PROPERTIES PREFIX "") set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME "koboldcpp_cublas") set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) - target_link_libraries(${TARGET} PUBLIC Threads::Threads ggml ggml_v1 ggml_v2 ggml_v3 common2 gpttype_adapter whisper_adapter tts_adapter sdtype_adapter ${LLAMA_EXTRA_LIBS}) + target_link_libraries(${TARGET} PUBLIC Threads::Threads ggml ggml_v1 ggml_v2 ggml_v3 common2 gpttype_adapter whisper_adapter tts_adapter embeddings_adapter sdtype_adapter ${LLAMA_EXTRA_LIBS}) target_compile_features(${TARGET} PRIVATE cxx_std_17) add_custom_command( @@ -529,7 +536,7 @@ if (LLAMA_HIPBLAS) set_target_properties(${TARGET} PROPERTIES PREFIX "") set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME "koboldcpp_hipblas") set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) - target_link_libraries(${TARGET} PUBLIC Threads::Threads ggml ggml_v1 ggml_v2 ggml_v3 common2 gpttype_adapter whisper_adapter tts_adapter sdtype_adapter ${LLAMA_EXTRA_LIBS}) + target_link_libraries(${TARGET} PUBLIC Threads::Threads ggml ggml_v1 ggml_v2 ggml_v3 common2 gpttype_adapter whisper_adapter tts_adapter embeddings_adapter sdtype_adapter ${LLAMA_EXTRA_LIBS}) target_compile_features(${TARGET} PRIVATE cxx_std_17) add_custom_command( diff --git a/Makefile b/Makefile index 3182a8867..f53116dff 100644 --- a/Makefile +++ b/Makefile @@ -611,6 +611,9 @@ whispercpp_cublas.o: otherarch/whispercpp/whisper_adapter.cpp tts_default.o: otherarch/tts_adapter.cpp $(CXX) $(CXXFLAGS) -c $< -o $@ +embeddings_default.o: otherarch/embeddings_adapter.cpp + $(CXX) $(CXXFLAGS) -c $< -o $@ + # idiotic "for easier compilation" GPTTYPE_ADAPTER = gpttype_adapter.cpp otherarch/llama_v2.cpp otherarch/llama_v3.cpp src/llama.cpp src/llama-impl.cpp src/llama-chat.cpp src/llama-mmap.cpp src/llama-context.cpp src/llama-adapter.cpp src/llama-arch.cpp src/llama-batch.cpp src/llama-vocab.cpp src/llama-grammar.cpp src/llama-sampling.cpp src/llama-kv-cache.cpp src/llama-model-loader.cpp src/llama-model.cpp src/llama-quant.cpp src/llama-hparams.cpp otherarch/gptj_v1.cpp otherarch/gptj_v2.cpp otherarch/gptj_v3.cpp otherarch/gpt2_v1.cpp otherarch/gpt2_v2.cpp otherarch/gpt2_v3.cpp otherarch/rwkv_v2.cpp otherarch/rwkv_v3.cpp otherarch/neox_v2.cpp otherarch/neox_v3.cpp otherarch/mpt_v3.cpp ggml/include/ggml.h ggml/include/ggml-cpu.h ggml/include/ggml-cuda.h include/llama.h otherarch/llama-util.h gpttype_adapter_failsafe.o: $(GPTTYPE_ADAPTER) @@ -666,11 +669,11 @@ else endif #generated libraries -koboldcpp_default: ggml.o ggml-cpu.o ggml_v3.o ggml_v2.o ggml_v1.o expose.o gpttype_adapter.o sdcpp_default.o whispercpp_default.o tts_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_FULL) $(OBJS) +koboldcpp_default: ggml.o ggml-cpu.o ggml_v3.o ggml_v2.o ggml_v1.o expose.o gpttype_adapter.o sdcpp_default.o whispercpp_default.o tts_default.o embeddings_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_FULL) $(OBJS) $(DEFAULT_BUILD) ifdef FAILSAFE_BUILD -koboldcpp_failsafe: ggml_v4_failsafe.o ggml-cpu_v4_failsafe.o ggml_v3_failsafe.o ggml_v2_failsafe.o ggml_v1_failsafe.o expose.o gpttype_adapter_failsafe.o sdcpp_default.o whispercpp_default.o tts_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_FAILSAFE) $(OBJS) +koboldcpp_failsafe: ggml_v4_failsafe.o ggml-cpu_v4_failsafe.o ggml_v3_failsafe.o ggml_v2_failsafe.o ggml_v1_failsafe.o expose.o gpttype_adapter_failsafe.o sdcpp_default.o whispercpp_default.o tts_default.o embeddings_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_FAILSAFE) $(OBJS) $(FAILSAFE_BUILD) else koboldcpp_failsafe: @@ -678,7 +681,7 @@ koboldcpp_failsafe: endif ifdef NOAVX2_BUILD -koboldcpp_noavx2: ggml_v4_noavx2.o ggml-cpu_v4_noavx2.o ggml_v3_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_failsafe.o sdcpp_default.o whispercpp_default.o tts_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_SIMPLE) $(OBJS) +koboldcpp_noavx2: ggml_v4_noavx2.o ggml-cpu_v4_noavx2.o ggml_v3_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_failsafe.o sdcpp_default.o whispercpp_default.o tts_default.o embeddings_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_SIMPLE) $(OBJS) $(NOAVX2_BUILD) else koboldcpp_noavx2: @@ -686,12 +689,12 @@ koboldcpp_noavx2: endif ifdef CLBLAST_BUILD -koboldcpp_clblast: ggml_v4_clblast.o ggml-cpu_v4_clblast.o ggml_v3_clblast.o ggml_v2_clblast.o ggml_v1.o expose.o gpttype_adapter_clblast.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o whispercpp_default.o tts_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_FULL) $(OBJS) +koboldcpp_clblast: ggml_v4_clblast.o ggml-cpu_v4_clblast.o ggml_v3_clblast.o ggml_v2_clblast.o ggml_v1.o expose.o gpttype_adapter_clblast.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o whispercpp_default.o tts_default.o embeddings_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_FULL) $(OBJS) $(CLBLAST_BUILD) ifdef NOAVX2_BUILD -koboldcpp_clblast_noavx2: ggml_v4_clblast_noavx2.o ggml-cpu_v4_clblast_noavx2.o ggml_v3_clblast_noavx2.o ggml_v2_clblast_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_clblast_noavx2.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o whispercpp_default.o tts_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_SIMPLE) $(OBJS) +koboldcpp_clblast_noavx2: ggml_v4_clblast_noavx2.o ggml-cpu_v4_clblast_noavx2.o ggml_v3_clblast_noavx2.o ggml_v2_clblast_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_clblast_noavx2.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o whispercpp_default.o tts_default.o embeddings_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_SIMPLE) $(OBJS) $(CLBLAST_BUILD) -koboldcpp_clblast_failsafe: ggml_v4_clblast_failsafe.o ggml-cpu_v4_clblast_failsafe.o ggml_v3_clblast_failsafe.o ggml_v2_clblast_failsafe.o ggml_v1_failsafe.o expose.o gpttype_adapter_clblast_noavx2.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o whispercpp_default.o tts_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_SIMPLER) $(OBJS) +koboldcpp_clblast_failsafe: ggml_v4_clblast_failsafe.o ggml-cpu_v4_clblast_failsafe.o ggml_v3_clblast_failsafe.o ggml_v2_clblast_failsafe.o ggml_v1_failsafe.o expose.o gpttype_adapter_clblast_noavx2.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o whispercpp_default.o tts_default.o embeddings_default.o llavaclip_default.o llava.o ggml-backend_default.o ggml-backend-reg_default.o $(OBJS_SIMPLER) $(OBJS) $(CLBLAST_BUILD) else koboldcpp_clblast_noavx2: @@ -709,7 +712,7 @@ koboldcpp_clblast_failsafe: endif ifdef CUBLAS_BUILD -koboldcpp_cublas: ggml_v4_cublas.o ggml-cpu.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o gpttype_adapter_cublas.o sdcpp_cublas.o whispercpp_cublas.o tts_default.o llavaclip_cublas.o llava.o ggml-backend_cublas.o ggml-backend-reg_cublas.o $(CUBLAS_OBJS) $(OBJS_FULL) $(OBJS) +koboldcpp_cublas: ggml_v4_cublas.o ggml-cpu.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o gpttype_adapter_cublas.o sdcpp_cublas.o whispercpp_cublas.o tts_default.o embeddings_default.o llavaclip_cublas.o llava.o ggml-backend_cublas.o ggml-backend-reg_cublas.o $(CUBLAS_OBJS) $(OBJS_FULL) $(OBJS) $(CUBLAS_BUILD) else koboldcpp_cublas: @@ -717,7 +720,7 @@ koboldcpp_cublas: endif ifdef HIPBLAS_BUILD -koboldcpp_hipblas: ggml_v4_cublas.o ggml-cpu.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o gpttype_adapter_cublas.o sdcpp_cublas.o whispercpp_cublas.o tts_default.o llavaclip_cublas.o llava.o ggml-backend_cublas.o ggml-backend-reg_cublas.o $(HIP_OBJS) $(OBJS_FULL) $(OBJS) +koboldcpp_hipblas: ggml_v4_cublas.o ggml-cpu.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o gpttype_adapter_cublas.o sdcpp_cublas.o whispercpp_cublas.o tts_default.o embeddings_default.o llavaclip_cublas.o llava.o ggml-backend_cublas.o ggml-backend-reg_cublas.o $(HIP_OBJS) $(OBJS_FULL) $(OBJS) $(HIPBLAS_BUILD) else koboldcpp_hipblas: @@ -725,10 +728,10 @@ koboldcpp_hipblas: endif ifdef VULKAN_BUILD -koboldcpp_vulkan: ggml_v4_vulkan.o ggml-cpu.o ggml_v3.o ggml_v2.o ggml_v1.o expose.o gpttype_adapter_vulkan.o ggml-vulkan.o sdcpp_vulkan.o whispercpp_default.o tts_default.o llavaclip_vulkan.o llava.o ggml-backend_vulkan.o ggml-backend-reg_vulkan.o $(OBJS_FULL) $(OBJS) +koboldcpp_vulkan: ggml_v4_vulkan.o ggml-cpu.o ggml_v3.o ggml_v2.o ggml_v1.o expose.o gpttype_adapter_vulkan.o ggml-vulkan.o sdcpp_vulkan.o whispercpp_default.o tts_default.o embeddings_default.o llavaclip_vulkan.o llava.o ggml-backend_vulkan.o ggml-backend-reg_vulkan.o $(OBJS_FULL) $(OBJS) $(VULKAN_BUILD) ifdef NOAVX2_BUILD -koboldcpp_vulkan_noavx2: ggml_v4_vulkan_noavx2.o ggml-cpu_v4_noavx2.o ggml_v3_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_vulkan_noavx2.o ggml-vulkan.o sdcpp_vulkan.o whispercpp_default.o tts_default.o llavaclip_vulkan.o llava.o ggml-backend_vulkan.o ggml-backend-reg_vulkan.o $(OBJS_SIMPLE) $(OBJS) +koboldcpp_vulkan_noavx2: ggml_v4_vulkan_noavx2.o ggml-cpu_v4_noavx2.o ggml_v3_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_vulkan_noavx2.o ggml-vulkan.o sdcpp_vulkan.o whispercpp_default.o tts_default.o embeddings_default.o llavaclip_vulkan.o llava.o ggml-backend_vulkan.o ggml-backend-reg_vulkan.o $(OBJS_SIMPLE) $(OBJS) $(VULKAN_BUILD) else koboldcpp_vulkan_noavx2: diff --git a/expose.cpp b/expose.cpp index a9fe0b9e4..f817241fc 100644 --- a/expose.cpp +++ b/expose.cpp @@ -247,6 +247,15 @@ extern "C" return ttstype_generate(inputs); } + bool embeddings_load_model(const embeddings_load_model_inputs inputs) + { + return embeddingstype_load_model(inputs); + } + embeddings_generation_outputs embeddings_generate(const embeddings_generation_inputs inputs) + { + return embeddingstype_generate(inputs); + } + const char * new_token(int idx) { if (generated_tokens.size() <= idx || idx < 0) return nullptr; diff --git a/expose.h b/expose.h index 9a8b6e643..4189e5142 100644 --- a/expose.h +++ b/expose.h @@ -235,6 +235,29 @@ struct tts_generation_outputs const char * data = ""; }; +struct embeddings_load_model_inputs +{ + const int threads = 4; + const char * model_filename = nullptr; + const char * executable_path = nullptr; + const int clblast_info = 0; + const int cublas_info = 0; + const char * vulkan_info = nullptr; + const int gpulayers = 0; + const bool flash_attention = false; + const bool quiet = false; + const int debugmode = 0; +}; +struct embeddings_generation_inputs +{ + const char * prompt = nullptr; +}; +struct embeddings_generation_outputs +{ + int status = -1; + const char * data = ""; +}; + extern std::string executable_path; extern std::string lora_filename; extern std::string lora_base; diff --git a/koboldcpp.py b/koboldcpp.py index 0a89bd9fe..254315b02 100644 --- a/koboldcpp.py +++ b/koboldcpp.py @@ -318,6 +318,25 @@ class tts_generation_outputs(ctypes.Structure): _fields_ = [("status", ctypes.c_int), ("data", ctypes.c_char_p)] +class embeddings_load_model_inputs(ctypes.Structure): + _fields_ = [("threads", ctypes.c_int), + ("model_filename", ctypes.c_char_p), + ("executable_path", ctypes.c_char_p), + ("clblast_info", ctypes.c_int), + ("cublas_info", ctypes.c_int), + ("vulkan_info", ctypes.c_char_p), + ("gpulayers", ctypes.c_int), + ("flash_attention", ctypes.c_bool), + ("quiet", ctypes.c_bool), + ("debugmode", ctypes.c_int)] + +class embeddings_generation_inputs(ctypes.Structure): + _fields_ = [("prompt", ctypes.c_char_p)] + +class embeddings_generation_outputs(ctypes.Structure): + _fields_ = [("status", ctypes.c_int), + ("data", ctypes.c_char_p)] + def getdirpath(): return os.path.dirname(os.path.realpath(__file__)) def getabspath(): @@ -491,6 +510,10 @@ def init_library(): handle.tts_load_model.restype = ctypes.c_bool handle.tts_generate.argtypes = [tts_generation_inputs] handle.tts_generate.restype = tts_generation_outputs + handle.embeddings_load_model.argtypes = [embeddings_load_model_inputs] + handle.embeddings_load_model.restype = ctypes.c_bool + handle.embeddings_generate.argtypes = [embeddings_generation_inputs] + handle.embeddings_generate.restype = embeddings_generation_outputs handle.last_logprobs.restype = last_logprobs_outputs handle.detokenize.argtypes = [token_count_outputs] handle.detokenize.restype = ctypes.c_char_p @@ -1564,6 +1587,28 @@ def tts_generate(genparams): outstr = ret.data.decode("UTF-8","ignore") return outstr +def embeddings_load_model(model_filename): + global args + inputs = embeddings_load_model_inputs() + inputs.model_filename = model_filename.encode("UTF-8") + inputs.gpulayers = (999 if args.ttsgpu else 0) + inputs.flash_attention = args.flashattention + inputs.threads = args.threads + inputs = set_backend_props(inputs) + ret = handle.embeddings_load_model(inputs) + return ret + +def embeddings_generate(genparams): + global args + prompt = genparams.get("input", "") + inputs = embeddings_generation_inputs() + inputs.prompt = prompt.encode("UTF-8") + ret = handle.embeddings_generate(inputs) + outstr = "" + if ret.status==1: + outstr = ret.data.decode("UTF-8","ignore") + return outstr + def tokenize_ids(countprompt,tcaddspecial): rawcountdata = handle.token_count(countprompt.encode("UTF-8"),tcaddspecial) countlimit = rawcountdata.count if (rawcountdata.count>=0 and rawcountdata.count<50000) else 0 diff --git a/model_adapter.h b/model_adapter.h index 883e0b93f..9b1859de6 100644 --- a/model_adapter.h +++ b/model_adapter.h @@ -110,6 +110,9 @@ whisper_generation_outputs whispertype_generate(const whisper_generation_inputs bool ttstype_load_model(const tts_load_model_inputs inputs); tts_generation_outputs ttstype_generate(const tts_generation_inputs inputs); +bool embeddingstype_load_model(const embeddings_load_model_inputs inputs); +embeddings_generation_outputs embeddingstype_generate(const embeddings_generation_inputs inputs); + void timer_start(); double timer_check(); void print_tok_vec(std::vector &embd); diff --git a/otherarch/embeddings_adapter.cpp b/otherarch/embeddings_adapter.cpp new file mode 100644 index 000000000..94344ae5f --- /dev/null +++ b/otherarch/embeddings_adapter.cpp @@ -0,0 +1,276 @@ +#include "model_adapter.h" +#include "otherarch/utils.h" + +#include "common.h" +#include "sampling.h" +#include "llama.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "src/llama-context.h" + +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + +static llama_context * embeddings_ctx = nullptr; //text to codes ctx +static std::string ttsplatformenv, ttsdeviceenv, ttsvulkandeviceenv; +bool embeddings_debug = false; +const int max_batchsize = 2048; +static std::string last_output = ""; + +static void batch_add_seq(llama_batch & batch, const std::vector & tokens, llama_seq_id seq_id) { + size_t n_tokens = tokens.size(); + for (size_t i = 0; i < n_tokens; i++) { + common_batch_add(batch, tokens[i], i, { seq_id }, true); + } +} + +static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd, int embd_norm) { + const enum llama_pooling_type pooling_type = llama_pooling_type(ctx); + const struct llama_model * model = llama_get_model(ctx); + + // clear previous kv_cache values (irrelevant for embeddings) + llama_kv_self_clear(ctx); + + // run model + if(embeddings_debug) + { + printf("\n%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq); + } + if (llama_model_has_encoder(model) && !llama_model_has_decoder(model)) { + // encoder-only model + if (llama_encode(ctx, batch) < 0) { + printf("\n%s : failed to encode\n", __func__); + } + } else if (!llama_model_has_encoder(model) && llama_model_has_decoder(model)) { + // decoder-only model + if (llama_decode(ctx, batch) < 0) { + printf("\n%s : failed to decode\n", __func__); + } + } + + for (int i = 0; i < batch.n_tokens; i++) { + if (!batch.logits[i]) { + continue; + } + const float * embd = nullptr; + int embd_pos = 0; + if (pooling_type == LLAMA_POOLING_TYPE_NONE) { + // try to get token embeddings + embd = llama_get_embeddings_ith(ctx, i); + embd_pos = i; + if(embd == NULL) + { + printf("\nfailed to get token embeddings\n"); + } + } else { + // try to get sequence embeddings - supported only when pooling_type is not NONE + embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]); + embd_pos = batch.seq_id[i][0]; + if(embd == NULL) + { + printf("\nfailed to get sequence embeddings\n"); + } + } + float * out = output + embd_pos * n_embd; + common_embd_normalize(embd, out, n_embd, embd_norm); + } +} + +bool embeddingstype_load_model(const embeddings_load_model_inputs inputs) +{ + //duplicated from expose.cpp + int cl_parseinfo = inputs.clblast_info; //first digit is whether configured, second is platform, third is devices + std::string usingclblast = "GGML_OPENCL_CONFIGURED="+std::to_string(cl_parseinfo>0?1:0); + putenv((char*)usingclblast.c_str()); + cl_parseinfo = cl_parseinfo%100; //keep last 2 digits + int platform = cl_parseinfo/10; + int devices = cl_parseinfo%10; + ttsplatformenv = "GGML_OPENCL_PLATFORM="+std::to_string(platform); + ttsdeviceenv = "GGML_OPENCL_DEVICE="+std::to_string(devices); + putenv((char*)ttsplatformenv.c_str()); + putenv((char*)ttsdeviceenv.c_str()); + std::string vulkan_info_raw = inputs.vulkan_info; + std::string vulkan_info_str = ""; + for (size_t i = 0; i < vulkan_info_raw.length(); ++i) { + vulkan_info_str += vulkan_info_raw[i]; + if (i < vulkan_info_raw.length() - 1) { + vulkan_info_str += ","; + } + } + if(vulkan_info_str!="") + { + ttsvulkandeviceenv = "GGML_VK_VISIBLE_DEVICES="+vulkan_info_str; + putenv((char*)ttsvulkandeviceenv.c_str()); + } + + llama_backend_init(); + + std::string modelfile = inputs.model_filename; + printf("\nLoading Embeddings Model: %s \n",modelfile.c_str()); + + embeddings_debug = (inputs.debugmode>0); + + // tts init + llama_model_params model_params = llama_model_default_params(); + llama_context_params ctx_params = llama_context_default_params(); + const int nthreads = inputs.threads; + model_params.use_mmap = false; + model_params.use_mlock = false; + model_params.n_gpu_layers = inputs.gpulayers; //offload if possible + model_params.split_mode = llama_split_mode::LLAMA_SPLIT_MODE_LAYER; + ctx_params.embeddings = true; + ctx_params.n_ubatch = ctx_params.n_ubatch = max_batchsize; //max size, must fit + ctx_params.n_ctx = max_batchsize + 512; + ctx_params.logits_all = false; + ctx_params.offload_kqv = true; + ctx_params.n_threads = nthreads; + ctx_params.n_threads_batch = nthreads; + ctx_params.flash_attn = inputs.flash_attention; + + llama_model * embeddingsmodel = llama_model_load_from_file(modelfile.c_str(), model_params); + embeddings_ctx = llama_new_context_with_model(embeddingsmodel, ctx_params); + + if (embeddings_ctx == nullptr) { + printf("\nEmbeddings Model Load Error: Failed to initialize context!\n"); + return false; + } + + std::vector tmp = {1, 2, 3, 4}; + llama_kv_cache_clear(embeddings_ctx); + auto er = llama_decode(embeddings_ctx, llama_batch_get_one(tmp.data(), tmp.size())); + if(er!=0) + { + printf("\nEmbeddings Model Eval returned nonzero: %d\n",er); + return false; + } + + const llama_vocab * vocab = llama_model_get_vocab(embeddingsmodel); + + const int n_ctx_train = llama_model_n_ctx_train(embeddingsmodel); + const int n_ctx = llama_n_ctx(embeddings_ctx); + + if (llama_model_has_encoder(embeddingsmodel) && llama_model_has_decoder(embeddingsmodel)) { + printf("\n%s: computing embeddings in encoder-decoder models is not supported\n", __func__); + return false; + } + + if (n_ctx > n_ctx_train) { + printf("\n%s: warning: Embeddings model was trained on only %d context tokens (%d specified)\n", __func__, n_ctx_train, n_ctx); + } + + printf("\nEmbeddings Model Load Complete.\n"); + return true; +} + +embeddings_generation_outputs embeddingstype_generate(const embeddings_generation_inputs inputs) +{ + embeddings_generation_outputs output; + + if(embeddings_ctx==nullptr) + { + printf("\nWarning: KCPP Embeddings Model not initialized!\n"); + output.data = ""; + output.status = 0; + return output; + } + + double timetaken = 0; + timer_start(); + + llama_kv_cache_clear(embeddings_ctx); + std::string prompt = inputs.prompt; + + // max batch size + const uint64_t n_batch = max_batchsize; + + // tokenize the prompts and trim + std::vector> prompt_inputs; + auto inp = common_tokenize(embeddings_ctx, prompt, true, true); + if (inp.size() > n_batch) { + printf("\n%s: number of tokens in input line (%lld) exceeds batch size (%lld), lower token amount!\n", + __func__, (long long int) inp.size(), (long long int) n_batch); + output.data = ""; + output.status = 0; + return output; + } + prompt_inputs.push_back(inp); + + printf("\nGenerating Embeddings for %d tokens...",inp.size()); + + // initialize batch + const int n_prompts = 1; + const enum llama_pooling_type pooling_type = llama_pooling_type(embeddings_ctx); + struct llama_batch batch = llama_batch_init(n_batch, 0, 1); + + // count number of embeddings + int n_embd_count = 0; + if (pooling_type == LLAMA_POOLING_TYPE_NONE) { + for (int k = 0; k < n_prompts; k++) { + n_embd_count += prompt_inputs[k].size(); + } + } else { + n_embd_count = n_prompts; + } + + // allocate output + const llama_model * embeddingsmodel = llama_get_model(embeddings_ctx); + const int n_embd = llama_model_n_embd(embeddingsmodel); + std::vector embeddings(n_embd_count * n_embd, 0); + float * emb = embeddings.data(); + int embd_normalize = 2; //euclidean + + // break into batches + int e = 0; // number of embeddings already stored + int s = 0; // number of prompts in current batch + for (int k = 0; k < n_prompts; k++) { + // clamp to n_batch tokens + auto & inp = prompt_inputs[k]; + const uint64_t n_toks = inp.size(); + // encode if at capacity + if (batch.n_tokens + n_toks > n_batch) { + float * out = emb + e * n_embd; + batch_decode(embeddings_ctx, batch, out, s, n_embd, embd_normalize); + e += pooling_type == LLAMA_POOLING_TYPE_NONE ? batch.n_tokens : s; + s = 0; + common_batch_clear(batch); + } + // add to batch + batch_add_seq(batch, inp, s); + s += 1; + } + + // final batch + float * out = emb + e * n_embd; + batch_decode(embeddings_ctx, batch, out, s, n_embd, embd_normalize); + + std::string outputarray = "["; + for (int i = 0; i < n_embd; i++) { + if (i > 0) + { + outputarray += ","; + } + outputarray += std::to_string(emb[i]); + } + outputarray += "]"; + last_output = outputarray; + + // clean up + llama_batch_free(batch); + + timetaken = timer_check(); + printf("\nText Embeddings Generated %d values in %.2fs.\n",(int) n_embd,timetaken); + + output.data = last_output.c_str(); + output.status = 1; + return output; +}