fossilize ggml library ver 3, to support ggjtv3

This commit is contained in:
Concedo 2024-01-20 10:49:25 +08:00
parent 1804238e3f
commit db14de5c32
18 changed files with 44315 additions and 1591 deletions

View file

@ -139,7 +139,7 @@ endif
ifdef LLAMA_CUBLAS
CUBLAS_FLAGS = -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
CUBLASLD_FLAGS = -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -Lconda/envs/linux/lib -Lconda/envs/linux/lib/stubs -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib -L/usr/lib/wsl/lib
CUBLAS_OBJS = ggml-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
CUBLAS_OBJS = ggml-cuda.o ggml_v3-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
NVCC = nvcc
NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math
@ -193,6 +193,8 @@ ggml_v2-cuda.o: otherarch/ggml_v2-cuda.cu otherarch/ggml_v2-cuda.h
$(NVCC) $(NVCCFLAGS) $(subst -Ofast,-O3,$(CXXFLAGS)) $(CUBLAS_FLAGS) $(CUBLAS_CXXFLAGS) -Wno-pedantic -c $< -o $@
ggml_v2-cuda-legacy.o: otherarch/ggml_v2-cuda-legacy.cu otherarch/ggml_v2-cuda-legacy.h
$(NVCC) $(NVCCFLAGS) $(subst -Ofast,-O3,$(CXXFLAGS)) $(CUBLAS_FLAGS) $(CUBLAS_CXXFLAGS) -Wno-pedantic -c $< -o $@
ggml_v3-cuda.o: otherarch/ggml_v3-cuda.cu otherarch/ggml_v3-cuda.h
$(NVCC) $(NVCCFLAGS) $(subst -Ofast,-O3,$(CXXFLAGS)) $(CUBLAS_FLAGS) $(CUBLAS_CXXFLAGS) -Wno-pedantic -c $< -o $@
endif # LLAMA_CUBLAS
ifdef LLAMA_HIPBLAS
@ -205,7 +207,7 @@ ifdef LLAMA_HIPBLAS
LLAMA_CUDA_KQUANTS_ITER ?= 2
HIPFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS $(shell $(ROCM_PATH)/bin/hipconfig -C)
HIPLDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib -lhipblas -lamdhip64 -lrocblas
HIP_OBJS += ggml-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
HIP_OBJS += ggml-cuda.o ggml_v3-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
ggml-cuda.o: HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS)) \
-DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X) \
-DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y) \
@ -218,12 +220,18 @@ ggml_v2-cuda-legacy.o: HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS)) \
-DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X) \
-DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y) \
-DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
ggml_v3-cuda.o: HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS)) \
-DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X) \
-DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y) \
-DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
$(HCXX) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
ggml_v2-cuda.o: otherarch/ggml_v2-cuda.cu otherarch/ggml_v2-cuda.h
$(HCXX) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
ggml_v2-cuda-legacy.o: otherarch/ggml_v2-cuda-legacy.cu otherarch/ggml_v2-cuda-legacy.h
$(HCXX) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
ggml_v3-cuda.o: otherarch/ggml_v3-cuda.cu otherarch/ggml_v3-cuda.h
$(HCXX) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
endif # LLAMA_HIPBLAS
@ -371,6 +379,22 @@ ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h
ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h
$(CC) $(CFLAGS) -c $< -o $@
#version 3 libs
ggml_v3.o: otherarch/ggml_v3.c otherarch/ggml_v3.h
$(CC) $(CFLAGS) $(FULLCFLAGS) -c $< -o $@
ggml_v3_openblas.o: otherarch/ggml_v3.c otherarch/ggml_v3.h
$(CC) $(CFLAGS) $(FULLCFLAGS) $(OPENBLAS_FLAGS) -c $< -o $@
ggml_v3_failsafe.o: otherarch/ggml_v3.c otherarch/ggml_v3.h
$(CC) $(CFLAGS) $(NONECFLAGS) -c $< -o $@
ggml_v3_noavx2.o: otherarch/ggml_v3.c otherarch/ggml_v3.h
$(CC) $(CFLAGS) $(SIMPLECFLAGS) -c $< -o $@
ggml_v3_clblast.o: otherarch/ggml_v3.c otherarch/ggml_v3.h
$(CC) $(CFLAGS) $(FULLCFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
ggml_v3_cublas.o: otherarch/ggml_v3.c otherarch/ggml_v3.h
$(CC) $(CFLAGS) $(FULLCFLAGS) $(CUBLAS_FLAGS) $(HIPFLAGS) -c $< -o $@
ggml_v3_clblast_noavx2.o: otherarch/ggml_v3.c otherarch/ggml_v3.h
$(CC) $(CFLAGS) $(SIMPLECFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
#version 2 libs
ggml_v2.o: otherarch/ggml_v2.c otherarch/ggml_v2.h
$(CC) $(CFLAGS) $(FULLCFLAGS) -c $< -o $@
@ -400,6 +424,8 @@ ggml_v2-opencl.o: otherarch/ggml_v2-opencl.cpp otherarch/ggml_v2-opencl.h
$(CXX) $(CXXFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
ggml_v2-opencl-legacy.o: otherarch/ggml_v2-opencl-legacy.c otherarch/ggml_v2-opencl-legacy.h
$(CC) $(CFLAGS) -c $< -o $@
ggml_v3-opencl.o: otherarch/ggml_v3-opencl.cpp otherarch/ggml_v3-opencl.h
$(CXX) $(CXXFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
# intermediate objects
llama.o: llama.cpp ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml-metal.h llama.h otherarch/llama-util.h
@ -440,11 +466,11 @@ gguf: examples/gguf/gguf.cpp build-info.h ggml.o llama.o $(OBJS)
#generated libraries
koboldcpp_default: ggml.o ggml_v2.o ggml_v1.o expose.o common.o gpttype_adapter.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
koboldcpp_default: ggml.o ggml_v3.o ggml_v2.o ggml_v1.o expose.o common.o gpttype_adapter.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
$(DEFAULT_BUILD)
ifdef OPENBLAS_BUILD
koboldcpp_openblas: ggml_openblas.o ggml_v2_openblas.o ggml_v1.o expose.o common.o gpttype_adapter.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
koboldcpp_openblas: ggml_openblas.o ggml_v3_openblas.o ggml_v2_openblas.o ggml_v1.o expose.o common.o gpttype_adapter.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
$(OPENBLAS_BUILD)
else
koboldcpp_openblas:
@ -452,7 +478,7 @@ koboldcpp_openblas:
endif
ifdef FAILSAFE_BUILD
koboldcpp_failsafe: ggml_failsafe.o ggml_v2_failsafe.o ggml_v1_failsafe.o expose.o common.o gpttype_adapter_failsafe.o ggml-quants_failsafe.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
koboldcpp_failsafe: ggml_failsafe.o ggml_v3_failsafe.o ggml_v2_failsafe.o ggml_v1_failsafe.o expose.o common.o gpttype_adapter_failsafe.o ggml-quants_failsafe.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
$(FAILSAFE_BUILD)
else
koboldcpp_failsafe:
@ -460,7 +486,7 @@ koboldcpp_failsafe:
endif
ifdef NOAVX2_BUILD
koboldcpp_noavx2: ggml_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o common.o gpttype_adapter_failsafe.o ggml-quants_noavx2.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
koboldcpp_noavx2: ggml_noavx2.o ggml_v3_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o common.o gpttype_adapter_failsafe.o ggml-quants_noavx2.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
$(NOAVX2_BUILD)
else
koboldcpp_noavx2:
@ -468,10 +494,10 @@ koboldcpp_noavx2:
endif
ifdef CLBLAST_BUILD
koboldcpp_clblast: ggml_clblast.o ggml_v2_clblast.o ggml_v1.o expose.o common.o gpttype_adapter_clblast.o ggml-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
koboldcpp_clblast: ggml_clblast.o ggml_v3_clblast.o ggml_v2_clblast.o ggml_v1.o expose.o common.o gpttype_adapter_clblast.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
$(CLBLAST_BUILD)
ifdef NOAVX2_BUILD
koboldcpp_clblast_noavx2: ggml_clblast_noavx2.o ggml_v2_clblast_noavx2.o ggml_v1_failsafe.o expose.o common.o gpttype_adapter_clblast_noavx2.o ggml-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o ggml-quants_noavx2.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
koboldcpp_clblast_noavx2: ggml_clblast_noavx2.o ggml_v3_clblast_noavx2.o ggml_v2_clblast_noavx2.o ggml_v1_failsafe.o expose.o common.o gpttype_adapter_clblast_noavx2.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o ggml-quants_noavx2.o ggml-alloc.o ggml-backend.o grammar-parser.o $(OBJS)
$(CLBLAST_BUILD)
else
koboldcpp_clblast_noavx2:
@ -485,7 +511,7 @@ koboldcpp_clblast_noavx2:
endif
ifdef CUBLAS_BUILD
koboldcpp_cublas: ggml_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o common.o gpttype_adapter_cublas.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(CUBLAS_OBJS) $(OBJS)
koboldcpp_cublas: ggml_cublas.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o common.o gpttype_adapter_cublas.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(CUBLAS_OBJS) $(OBJS)
$(CUBLAS_BUILD)
else
koboldcpp_cublas:
@ -493,7 +519,7 @@ koboldcpp_cublas:
endif
ifdef HIPBLAS_BUILD
koboldcpp_hipblas: ggml_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o common.o gpttype_adapter_cublas.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(HIP_OBJS) $(OBJS)
koboldcpp_hipblas: ggml_cublas.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o common.o gpttype_adapter_cublas.o ggml-quants.o ggml-alloc.o ggml-backend.o grammar-parser.o $(HIP_OBJS) $(OBJS)
$(HIPBLAS_BUILD)
else
koboldcpp_hipblas:

View file

@ -774,17 +774,29 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
}
//this is used for the mem_per_token eval, openblas needs more RAM
bool use_scratch = ggml_cpu_has_gpublas();
bool v3_use_scratch = ggml_v3_cpu_has_gpublas();
int cu_parseinfo_maindevice = inputs.cublas_info<=0?0:inputs.cublas_info;
printf("System Info: %s\n", llama_print_system_info());
#if defined(GGML_USE_CUBLAS)
if(file_format==FileFormat::GGUF_LLAMA || file_format==FileFormat::GGUF_FALCON)
{
if(ggml_cpu_has_gpublas() && cu_parseinfo_maindevice>0)
{
printf("CUBLAS: Set main device to %d\n",cu_parseinfo_maindevice);
ggml_cuda_set_main_device(cu_parseinfo_maindevice);
}
}
else
{
if(ggml_v3_cpu_has_gpublas() && cu_parseinfo_maindevice>0)
{
printf("CUBLAS v3: Set main device to %d\n",cu_parseinfo_maindevice);
ggml_v3_cuda_set_main_device(cu_parseinfo_maindevice);
}
}
#endif
SetQuantsUnshuffled(false);
if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2)
@ -1187,7 +1199,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
n_vocab = gpt2_ctx_v3.hparams.n_vocab;
// determine the required inference memory per token:
gpt2_eval(gpt2_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch);
gpt2_eval(gpt2_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, v3_use_scratch);
return ModelLoadResult::SUCCESS;
}
else
@ -1262,19 +1274,19 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
n_vocab = gptj_ctx_v3.hparams.n_vocab;
// determine the required inference memory per token:
gptj_eval(gptj_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch);
gptj_eval(gptj_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, v3_use_scratch);
//if the logits are NAN or duplicated, it means the model is incompatible
std::vector<float> oldlogits(logits);
//this is another hack because they change the library - we run the eval through the model
//twice and compare logits. if they give the same logits for different inputs, model is broken
gptj_eval(gptj_ctx_v3, kcpp_params->n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token, use_scratch);
gptj_eval(gptj_ctx_v3, kcpp_params->n_threads, 0, {4, 5, 6, 7}, logits, mem_per_token, v3_use_scratch);
if(logits.size()>0 && (IsNanCheck(logits[0]) || LogitsDuplicated(oldlogits,logits)))
{
printf("\nBad Logits detected! Retrying GPT-J model loading...");
ggml_free(gptj_ctx_v3.ctx);
ggml_v3_free(gptj_ctx_v3.ctx);
return ModelLoadResult::RETRY_LOAD;
}
@ -1338,7 +1350,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
n_vocab = neox_ctx_v3.hparams.n_vocab;
// determine the required inference memory per token:
gpt_neox_eval(neox_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, use_scratch);
gpt_neox_eval(neox_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, v3_use_scratch);
return ModelLoadResult::SUCCESS;
}
@ -1399,7 +1411,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
n_vocab = mpt_ctx_v3.hparams.n_vocab;
// determine the required inference memory per token:
mpt_eval(mpt_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token, use_scratch);
mpt_eval(mpt_ctx_v3, kcpp_params->n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token, v3_use_scratch);
return ModelLoadResult::SUCCESS;
}
else
@ -1709,7 +1721,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
}
bool startedsampling = false;
bool use_scratch = true; //for normal inference always use scratch
bool v3_use_scratch = true; //for normal inference always use scratch
timer_start();
double time1 = 0, time2 = 0;
@ -1849,7 +1861,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
}
else if(file_format==FileFormat::GPT2_4)
{
evalres = gpt2_eval(gpt2_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, use_scratch);
evalres = gpt2_eval(gpt2_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, v3_use_scratch);
}
else if(file_format==FileFormat::NEOX_1 || file_format == FileFormat::NEOX_2 || file_format == FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5)
{
@ -1857,7 +1869,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
}
else if(file_format==FileFormat::NEOX_6|| file_format==FileFormat::NEOX_7)
{
evalres = gpt_neox_eval(neox_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, use_scratch);
evalres = gpt_neox_eval(neox_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, v3_use_scratch);
}
else if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2)
{
@ -1869,11 +1881,11 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
}
else if(file_format==FileFormat::GPTJ_5)
{
evalres = gptj_eval(gptj_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, use_scratch);
evalres = gptj_eval(gptj_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, mem_per_token, v3_use_scratch);
}
else if(file_format==FileFormat::MPT_1)
{
evalres = mpt_eval(mpt_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, false, mem_per_token, use_scratch);
evalres = mpt_eval(mpt_ctx_v3, kcpp_params->n_threads, n_past, embd, logits, false, mem_per_token, v3_use_scratch);
}
else
{

10325
otherarch/ggml_v3-cuda.cu Normal file

File diff suppressed because it is too large Load diff

53
otherarch/ggml_v3-cuda.h Normal file
View file

@ -0,0 +1,53 @@
#pragma once
#include "ggml_v3.h"
#ifdef GGML_USE_HIPBLAS
#define GGML_V3_CUDA_NAME "ROCm"
#define GGML_V3_CUBLAS_NAME "hipBLAS"
#else
#define GGML_V3_CUDA_NAME "CUDA"
#define GGML_V3_CUBLAS_NAME "cuBLAS"
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define GGML_V3_CUDA_MAX_DEVICES 16
// Always success. To check if CUDA is actually loaded, use `ggml_v3_cublas_loaded`.
GGML_V3_API void ggml_v3_init_cublas(void);
// Returns `true` if there are available CUDA devices and cublas loads successfully; otherwise, it returns `false`.
GGML_V3_API bool ggml_v3_cublas_loaded(void);
GGML_V3_API void * ggml_v3_cuda_host_malloc(size_t size);
GGML_V3_API void ggml_v3_cuda_host_free(void * ptr);
GGML_V3_API bool ggml_v3_cuda_can_mul_mat(const struct ggml_v3_tensor * src0, const struct ggml_v3_tensor * src1, struct ggml_v3_tensor * dst);
GGML_V3_API void ggml_v3_cuda_set_tensor_split(const float * tensor_split);
GGML_V3_API void ggml_v3_cuda_transform_tensor(void * data, struct ggml_v3_tensor * tensor);
GGML_V3_API void ggml_v3_cuda_free_data(struct ggml_v3_tensor * tensor);
GGML_V3_API void ggml_v3_cuda_assign_buffers(struct ggml_v3_tensor * tensor);
GGML_V3_API void ggml_v3_cuda_assign_buffers_no_scratch(struct ggml_v3_tensor * tensor);
GGML_V3_API void ggml_v3_cuda_assign_buffers_force_inplace(struct ggml_v3_tensor * tensor);
GGML_V3_API void ggml_v3_cuda_assign_buffers_no_alloc(struct ggml_v3_tensor * tensor);
GGML_V3_API void ggml_v3_cuda_assign_scratch_offset(struct ggml_v3_tensor * tensor, size_t offset);
GGML_V3_API void ggml_v3_cuda_copy_to_device(struct ggml_v3_tensor * tensor);
GGML_V3_API void ggml_v3_cuda_set_main_device(int main_device);
GGML_V3_API void ggml_v3_cuda_set_mul_mat_q(bool mul_mat_q);
GGML_V3_API void ggml_v3_cuda_set_scratch_size(size_t scratch_size);
GGML_V3_API void ggml_v3_cuda_free_scratch(void);
GGML_V3_API bool ggml_v3_cuda_compute_forward(struct ggml_v3_compute_params * params, struct ggml_v3_tensor * tensor);
GGML_V3_API int ggml_v3_cuda_get_device_count(void);
GGML_V3_API void ggml_v3_cuda_get_device_description(int device, char * description, size_t description_size);
#ifdef __cplusplus
}
#endif

1908
otherarch/ggml_v3-opencl.cpp Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,25 @@
#pragma once
#include "ggml_v3.h"
#ifdef __cplusplus
extern "C" {
#endif
GGML_V3_API void ggml_v3_cl_init(void);
GGML_V3_API void ggml_v3_cl_mul(const struct ggml_v3_tensor * src0, const struct ggml_v3_tensor * src1, struct ggml_v3_tensor * dst);
GGML_V3_API bool ggml_v3_cl_can_mul_mat(const struct ggml_v3_tensor * src0, const struct ggml_v3_tensor * src1, struct ggml_v3_tensor * dst);
GGML_V3_API size_t ggml_v3_cl_mul_mat_get_wsize(const struct ggml_v3_tensor * src0, const struct ggml_v3_tensor * src1, struct ggml_v3_tensor * dst);
GGML_V3_API void ggml_v3_cl_mul_mat(const struct ggml_v3_tensor * src0, const struct ggml_v3_tensor * src1, struct ggml_v3_tensor * dst, void * wdata, size_t wsize);
GGML_V3_API void * ggml_v3_cl_host_malloc(size_t size);
GGML_V3_API void ggml_v3_cl_host_free(void * ptr);
GGML_V3_API void ggml_v3_cl_free_data(const struct ggml_v3_tensor* tensor);
GGML_V3_API void ggml_v3_cl_transform_tensor(void * data, struct ggml_v3_tensor * tensor);
#ifdef __cplusplus
}
#endif

28222
otherarch/ggml_v3.c Normal file

File diff suppressed because it is too large Load diff

2261
otherarch/ggml_v3.h Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
#include "ggml.h"
#include "ggml_v3.h"
#include "otherarch.h"
#include "utils.h"
@ -17,10 +17,10 @@
#include "model_adapter.h"
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#include "ggml_v3-cuda.h"
#endif
#if defined(GGML_USE_CLBLAST)
#include "ggml-opencl.h"
#include "ggml_v3-opencl.h"
#endif
@ -57,7 +57,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
const int32_t qntvr = hparams.ftype / GGML_V3_QNT_VERSION_FACTOR;
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
printf("%s: n_ctx = %d (%d)\n", __func__, hparams.n_ctx,origmaxctx);
@ -67,7 +67,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
printf("%s: ftype = %d\n", __func__, hparams.ftype);
printf("%s: qntvr = %d\n", __func__, qntvr);
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
hparams.ftype %= GGML_V3_QNT_VERSION_FACTOR;
}
// load vocab
@ -113,8 +113,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
// in order to save memory and also to speed up the computation
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype));
if (wtype == GGML_TYPE_COUNT) {
ggml_v3_type wtype = ggml_v3_ftype_to_ggml_v3_type((ggml_v3_ftype) (model.hparams.ftype));
if (wtype == GGML_V3_TYPE_COUNT) {
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n",
__func__, fname.c_str(), model.hparams.ftype);
return ModelLoadResult::FAIL;
@ -136,33 +136,33 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
const int kv_heads = hparams.n_head; // 1 if MQA else hparams.n_head
const int kv_dim = kv_heads * head_dim;
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b
ctx_size += n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32); // ln_f_g
ctx_size += n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32); // ln_f_b
ctx_size += n_vocab*n_embd*ggml_type_sizef(wtype); // wte
ctx_size += n_ctx*n_embd*ggml_type_sizef(GGML_TYPE_F32); // wpe
ctx_size += n_vocab*n_embd*ggml_type_sizef(wtype); // lm_head
ctx_size += n_vocab*n_embd*ggml_v3_type_sizef(wtype); // wte
ctx_size += n_ctx*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32); // wpe
ctx_size += n_vocab*n_embd*ggml_v3_type_sizef(wtype); // lm_head
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_1_g
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_1_b
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_g
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_b
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_2_g
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_2_b
ctx_size += n_layer*((n_embd + 2*kv_dim)*n_embd*ggml_type_sizef(wtype)); // c_attn_attn_w // TODO:
ctx_size += n_layer*( (n_embd + 2*kv_dim)*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_attn_b
ctx_size += n_layer*((n_embd + 2*kv_dim)*n_embd*ggml_v3_type_sizef(wtype)); // c_attn_attn_w // TODO:
ctx_size += n_layer*( (n_embd + 2*kv_dim)*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_attn_attn_b
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_proj_b
ctx_size += n_layer*(n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_attn_proj_w
ctx_size += n_layer*( n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_attn_proj_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w
ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_mlp_fc_w
ctx_size += n_layer*( 4*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_mlp_fc_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_mlp_proj_w
ctx_size += n_layer*( n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_mlp_proj_b
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F16); // memory_k
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F16); // memory_v
ctx_size += (6 + 12*n_layer)*1024; // object overhead
@ -171,14 +171,14 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
// create the ggml context
{
struct ggml_init_params params;
struct ggml_v3_init_params params;
params.mem_size = ctx_size;
params.mem_buffer = NULL;
params.no_alloc = false;
model.ctx = ggml_init(params);
model.ctx = ggml_v3_init(params);
if (!model.ctx) {
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
fprintf(stderr, "%s: ggml_v3_init() failed\n", __func__);
return ModelLoadResult::FAIL;
}
}
@ -198,12 +198,12 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
model.layers.resize(n_layer);
model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
model.ln_f_g = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
model.ln_f_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx);
model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.wte = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.wpe = ggml_v3_new_tensor_2d(ctx, GGML_V3_TYPE_F32, n_embd, n_ctx);
model.lm_head = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
// map by name
model.tensors["model/ln_f/g"] = model.ln_f_g;
@ -216,23 +216,23 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
for (int i = 0; i < n_layer; ++i) {
auto & layer = model.layers[i];
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_1_g = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.ln_1_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_2_g = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.ln_2_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd + 2*kv_dim);
layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd + 2*kv_dim);
layer.c_attn_attn_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_embd + 2*kv_dim);
layer.c_attn_attn_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd + 2*kv_dim);
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.c_attn_proj_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_proj_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); //TODO: 4*n_embd = config.n_inner
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd);
layer.c_mlp_fc_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); //TODO: 4*n_embd = config.n_inner
layer.c_mlp_fc_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, 4*n_embd);
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.c_mlp_proj_w = ggml_v3_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
layer.c_mlp_proj_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
// map by name
model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g;
@ -266,10 +266,10 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
const int n_mem = n_layer*std::max(origmaxctx,n_ctx);
const int n_elements = n_embd*n_mem;
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_k = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F16, n_elements);
model.memory_v = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F16, n_elements);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
const size_t memory_size = ggml_v3_nbytes(model.memory_k) + ggml_v3_nbytes(model.memory_v);
printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem);
}
@ -314,37 +314,37 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
__func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]);
return ModelLoadResult::FAIL;
}
if (ggml_nelements(tensor) != nelements) {
if (ggml_v3_nelements(tensor) != nelements) {
fprintf(stderr, "%s: tensor '%s' has wrong size in model file. got %d, expected %d\n",
__func__, name.data(), (int) ggml_nelements(tensor), nelements);
__func__, name.data(), (int) ggml_v3_nelements(tensor), nelements);
return ModelLoadResult::FAIL;
}
// for debugging
if (0) {
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_v3_type_name(ggml_v3_type(ttype)), ggml_v3_nbytes(tensor)/1024.0/1024.0, ggml_v3_nbytes(tensor));
}
const size_t bpe = ggml_type_size(ggml_type(ttype));
const size_t bpe = ggml_v3_type_size(ggml_v3_type(ttype));
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
if ((nelements*bpe)/ggml_v3_blck_size(tensor->type) != ggml_v3_nbytes(tensor)) {
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
__func__, name.data(), ggml_v3_nbytes(tensor), nelements*bpe);
return ModelLoadResult::FAIL;
}
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
fin.read(reinterpret_cast<char *>(tensor->data), ggml_v3_nbytes(tensor));
// GPT-2 models share the WTE tensor as the LM head
if (name == "model/wte" && has_lm_head == false) {
memcpy(model.lm_head->data, tensor->data, ggml_nbytes(tensor));
memcpy(model.lm_head->data, tensor->data, ggml_v3_nbytes(tensor));
}
if (name == "model/lm_head") {
has_lm_head = true;
}
total_size += ggml_nbytes(tensor);
total_size += ggml_v3_nbytes(tensor);
}
printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0);
@ -366,20 +366,20 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
#endif
for (int i = 0; i < n_gpu; ++i) {
const auto & layer = model.layers[i];
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_attn_w->backend = GGML_V3_BACKEND_GPU;
layer.c_attn_proj_w->backend = GGML_V3_BACKEND_GPU;
layer.c_mlp_fc_w->backend = GGML_V3_BACKEND_GPU;
layer.c_mlp_proj_w->backend = GGML_V3_BACKEND_GPU;
#if defined(GGML_USE_CLBLAST)
ggml_cl_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
ggml_v3_cl_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_v3_nbytes(layer.c_attn_attn_w);
ggml_v3_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_proj_w);
ggml_v3_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_v3_nbytes(layer.c_mlp_fc_w);
ggml_v3_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_v3_nbytes(layer.c_mlp_proj_w);
#else
ggml_cuda_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
ggml_v3_cuda_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_v3_nbytes(layer.c_attn_attn_w);
ggml_v3_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_proj_w);
ggml_v3_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_v3_nbytes(layer.c_mlp_fc_w);
ggml_v3_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_v3_nbytes(layer.c_mlp_proj_w);
#endif
}
#if defined(GGML_USE_CLBLAST)
@ -448,48 +448,48 @@ bool gpt2_eval(
}
}
struct ggml_init_params params;
struct ggml_v3_init_params params;
params.mem_size = buf_size;
params.mem_buffer = buf;
params.no_alloc = false;
struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, 8192, false);
struct ggml_v3_context * ctx0 = ggml_v3_init(params);
struct ggml_v3_cgraph * gf = ggml_v3_new_graph_custom(ctx0, 8192, false);
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
struct ggml_v3_tensor * embd = ggml_v3_new_tensor_1d(ctx0, GGML_V3_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_v3_element_size(embd));
struct ggml_tensor * position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
struct ggml_v3_tensor * position = ggml_v3_new_tensor_1d(ctx0, GGML_V3_TYPE_I32, N);
for (int i = 0; i < N; ++i) {
((int32_t *) position->data)[i] = n_past + i;
}
// wte + wpe
struct ggml_tensor * inpL =
ggml_add(ctx0,
ggml_get_rows(ctx0, model.wte, embd),
ggml_get_rows(ctx0, model.wpe, position));
struct ggml_v3_tensor * inpL =
ggml_v3_add(ctx0,
ggml_v3_get_rows(ctx0, model.wte, embd),
ggml_v3_get_rows(ctx0, model.wpe, position));
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * cur;
struct ggml_v3_tensor * cur;
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
ggml_v3_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// norm
{
// [ 768, N]
cur = ggml_norm(ctx0, inpL, default_norm_eps);
cur = ggml_v3_norm(ctx0, inpL, default_norm_eps);
// cur = ln_1_g*cur + ln_1_b
// [ 768, N]
cur = ggml_add(ctx0,
ggml_mul(ctx0,
ggml_repeat(ctx0, model.layers[il].ln_1_g, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_mul(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].ln_1_g, cur),
cur),
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
ggml_v3_repeat(ctx0, model.layers[il].ln_1_b, cur));
}
// attn
@ -501,104 +501,104 @@ bool gpt2_eval(
// cur = attn_w*cur + attn_b
// [2304, N]
{
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
model.layers[il].c_attn_attn_w,
cur);
cur = ggml_add(ctx0,
ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
cur);
}
// self-attention
{
struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd);
struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd);
struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd);
struct ggml_v3_tensor * Qcur = ggml_v3_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd);
struct ggml_v3_tensor * Kcur = ggml_v3_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd);
struct ggml_v3_tensor * Vcur = ggml_v3_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd);
// store key and value to memory
if (N >= 1) {
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past));
struct ggml_v3_tensor * k = ggml_v3_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_v3_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
struct ggml_v3_tensor * v = ggml_v3_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_v3_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
ggml_v3_build_forward_expand(gf, ggml_v3_cpy(ctx0, Kcur, k));
ggml_v3_build_forward_expand(gf, ggml_v3_cpy(ctx0, Vcur, v));
}
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
// [64, N, 12]
struct ggml_tensor * Q =
ggml_permute(ctx0,
ggml_cpy(ctx0,
struct ggml_v3_tensor * Q =
ggml_v3_permute(ctx0,
ggml_v3_cpy(ctx0,
Qcur,
ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)),
ggml_v3_new_tensor_3d(ctx0, GGML_V3_TYPE_F32, n_embd/n_head, n_head, N)),
0, 2, 1, 3);
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
// [64, n_past + N, 12]
struct ggml_tensor * K =
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
struct ggml_v3_tensor * K =
ggml_v3_permute(ctx0,
ggml_v3_reshape_3d(ctx0,
ggml_v3_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_v3_element_size(model.memory_k)*n_embd),
n_embd/n_head, n_head, n_past + N),
0, 2, 1, 3); //TODO: need to be tiled
// GG: flash attention
//struct ggml_tensor * V =
// ggml_cpy(ctx0,
// ggml_permute(ctx0,
// ggml_reshape_3d(ctx0,
// ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
//struct ggml_v3_tensor * V =
// ggml_v3_cpy(ctx0,
// ggml_v3_permute(ctx0,
// ggml_v3_reshape_3d(ctx0,
// ggml_v3_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_v3_element_size(model.memory_v)*n_embd),
// n_embd/n_head, n_head, n_past + N),
// 1, 2, 0, 3),
// ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head));
// ggml_v3_new_tensor_3d(ctx0, GGML_V3_TYPE_F32, n_past + N, n_embd/n_head, n_head));
//struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true);
//struct ggml_v3_tensor * KQV = ggml_v3_flash_attn(ctx0, Q, K, V, true);
// K * Q
// [n_past + N, N, 12]
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); //TODO: check if it broadcasts
struct ggml_v3_tensor * KQ = ggml_v3_mul_mat(ctx0, K, Q); //TODO: check if it broadcasts
// KQ_scaled = KQ / sqrt(n_embd/n_head)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_scaled =
ggml_scale_inplace(ctx0,
struct ggml_v3_tensor * KQ_scaled =
ggml_v3_scale_inplace(ctx0,
KQ,
1.0f/sqrt(float(n_embd)/n_head)
);
// KQ_masked = mask_past(KQ_scaled)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
struct ggml_v3_tensor * KQ_masked = ggml_v3_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
// KQ = soft_max(KQ_masked)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
struct ggml_v3_tensor * KQ_soft_max = ggml_v3_soft_max_inplace(ctx0, KQ_masked);
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
// [n_past + N, 64, 12]
struct ggml_tensor * V_trans =
ggml_cpy(ctx0,
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
struct ggml_v3_tensor * V_trans =
ggml_v3_cpy(ctx0,
ggml_v3_permute(ctx0,
ggml_v3_reshape_3d(ctx0,
ggml_v3_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_v3_element_size(model.memory_v)*n_embd),
n_embd/n_head, n_head, n_past + N),
1, 2, 0, 3),
ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd/n_head, n_head));
ggml_v3_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd/n_head, n_head));
// KQV = transpose(V) * KQ_soft_max
// [64, N, 12]
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
struct ggml_v3_tensor * KQV = ggml_v3_mul_mat(ctx0, V_trans, KQ_soft_max);
// KQV_merged = KQV.permute(0, 2, 1, 3)
// [64, 12, N]
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
struct ggml_v3_tensor * KQV_merged = ggml_v3_permute(ctx0, KQV, 0, 2, 1, 3);
// cur = KQV_merged.contiguous().view(n_embd, N)
// [768, N]
cur = ggml_cpy(ctx0,
cur = ggml_v3_cpy(ctx0,
KQV_merged,
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
ggml_v3_new_tensor_2d(ctx0, GGML_V3_TYPE_F32, n_embd, N));
}
// projection
@ -610,37 +610,37 @@ bool gpt2_eval(
// cur = proj_w*cur + proj_b
// [768, N]
{
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
model.layers[il].c_attn_proj_w,
cur);
cur = ggml_add(ctx0,
ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].c_attn_proj_b, cur),
cur);
}
// add the input
cur = ggml_add(ctx0, cur, inpL);
cur = ggml_v3_add(ctx0, cur, inpL);
struct ggml_tensor * inpFF = cur;
struct ggml_v3_tensor * inpFF = cur;
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
ggml_v3_set_scratch(ctx0, { 0, scr1_size, scr1, });
}
// feed-forward network
{
// norm
{
cur = ggml_norm(ctx0, inpFF, default_norm_eps);
cur = ggml_v3_norm(ctx0, inpFF, default_norm_eps);
// cur = ln_2_g*cur + ln_2_b
// [ 768, N]
cur = ggml_add(ctx0,
ggml_mul(ctx0,
ggml_repeat(ctx0, model.layers[il].ln_2_g, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_mul(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].ln_2_g, cur),
cur),
ggml_repeat(ctx0, model.layers[il].ln_2_b, cur));
ggml_v3_repeat(ctx0, model.layers[il].ln_2_b, cur));
}
// fully connected
@ -651,17 +651,17 @@ bool gpt2_eval(
//
// cur = fc_w*cur + fc_b
// [3072, N]
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
model.layers[il].c_mlp_fc_w,
cur);
cur = ggml_add(ctx0,
ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
cur);
// GELU activation
// [3072, N]
cur = ggml_gelu(ctx0, cur);
cur = ggml_v3_gelu(ctx0, cur);
// projection
// [ 768, 3072] - model.layers[il].c_mlp_proj_w
@ -671,71 +671,71 @@ bool gpt2_eval(
//
// cur = proj_w*cur + proj_b
// [768, N]
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
model.layers[il].c_mlp_proj_w,
cur);
cur = ggml_add(ctx0,
ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
cur);
}
// input for next layer
inpL = ggml_add(ctx0, cur, inpFF);
inpL = ggml_v3_add(ctx0, cur, inpFF);
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
ggml_v3_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// norm
{
// [ 768, N]
inpL = ggml_norm(ctx0, inpL, default_norm_eps);
inpL = ggml_v3_norm(ctx0, inpL, default_norm_eps);
// inpL = ln_f_g*inpL + ln_f_b
// [ 768, N]
inpL = ggml_add(ctx0,
ggml_mul(ctx0,
ggml_repeat(ctx0, model.ln_f_g, inpL),
inpL = ggml_v3_add(ctx0,
ggml_v3_mul(ctx0,
ggml_v3_repeat(ctx0, model.ln_f_g, inpL),
inpL),
ggml_repeat(ctx0, model.ln_f_b, inpL));
ggml_v3_repeat(ctx0, model.ln_f_b, inpL));
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
ggml_v3_set_scratch(ctx0, { 0, 0, nullptr, });
}
// inpL = WTE * inpL
// [ 768, 50257] - model.lm_head
// [ 768, N] - inpL
inpL = ggml_mul_mat(ctx0, model.lm_head, inpL);
inpL = ggml_v3_mul_mat(ctx0, model.lm_head, inpL);
// logits -> probs
//inpL = ggml_soft_max_inplace(ctx0, inpL);
//inpL = ggml_v3_soft_max_inplace(ctx0, inpL);
// run the computation
ggml_build_forward_expand(gf, inpL);
ggml_v3_build_forward_expand(gf, inpL);
kcpp_graph_compute_helper(gf, n_threads);
//if (n_past%100 == 0) {
// ggml_graph_print (&gf);
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
// ggml_v3_graph_print (&gf);
// ggml_v3_graph_dump_dot(&gf, NULL, "gpt-2.dot");
//}
//embd_w.resize(n_vocab*N);
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
//memcpy(embd_w.data(), ggml_v3_get_data(inpL), sizeof(float)*n_vocab*N);
// return result just for the last token
embd_w.resize(n_vocab);
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
memcpy(embd_w.data(), (float *) ggml_v3_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
if (mem_per_token == 0) {
mem_per_token = ggml_used_mem(ctx0)/N;
mem_per_token = ggml_v3_used_mem(ctx0)/N;
}
//printf("used_mem = %zu MB\n", ggml_used_mem(ctx0)/(1024*1024));
//printf("used_mem = %zu MB\n", ggml_v3_used_mem(ctx0)/(1024*1024));
ggml_free(ctx0);
ggml_v3_free(ctx0);
return true;
}

View file

@ -1,4 +1,4 @@
#include "ggml.h"
#include "ggml_v3.h"
#include "otherarch.h"
#include "utils.h"
@ -17,10 +17,10 @@
#include "model_adapter.h"
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#include "ggml_v3-cuda.h"
#endif
#if defined(GGML_USE_CLBLAST)
#include "ggml-opencl.h"
#include "ggml_v3-opencl.h"
#endif
// load the model's weights from a file
@ -57,7 +57,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
const int32_t qntvr = hparams.ftype / GGML_V3_QNT_VERSION_FACTOR;
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
printf("%s: n_ctx = %d (%d)\n", __func__, hparams.n_ctx,origmaxctx);
@ -70,7 +70,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
hparams.n_ctx = std::max(origmaxctx,hparams.n_ctx);
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
hparams.ftype %= GGML_V3_QNT_VERSION_FACTOR;
}
// load vocab
@ -102,8 +102,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
// in order to save memory and also to speed up the computation
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype));
if (wtype == GGML_TYPE_COUNT) {
ggml_v3_type wtype = ggml_v3_ftype_to_ggml_v3_type((ggml_v3_ftype) (model.hparams.ftype));
if (wtype == GGML_V3_TYPE_COUNT) {
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n",
__func__, fname.c_str(), model.hparams.ftype);
return ModelLoadResult::FAIL;
@ -111,7 +111,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
auto & ctx = model.ctx;
auto memory_type = GGML_TYPE_F16;
auto memory_type = GGML_V3_TYPE_F16;
size_t ctx_size = 0;
@ -123,31 +123,31 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
const int n_ctx = hparams.n_ctx;
const int n_vocab = hparams.n_vocab;
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b
ctx_size += n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32); // ln_f_g
ctx_size += n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32); // ln_f_b
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // wte
ctx_size += n_embd*n_vocab*ggml_v3_type_sizef(wtype); // wte
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // lmh_g
ctx_size += n_vocab*ggml_type_sizef(GGML_TYPE_F32); // lmh_b
ctx_size += n_embd*n_vocab*ggml_v3_type_sizef(wtype); // lmh_g
ctx_size += n_vocab*ggml_v3_type_sizef(GGML_V3_TYPE_F32); // lmh_b
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_1_g
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_1_b
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_q_proj_w
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_k_proj_w
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_v_proj_w
ctx_size += n_layer*(n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_attn_q_proj_w
ctx_size += n_layer*(n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_attn_k_proj_w
ctx_size += n_layer*(n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_attn_v_proj_w
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w
ctx_size += n_layer*(n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_attn_proj_w
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w
ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_mlp_fc_w
ctx_size += n_layer*( 4*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_mlp_fc_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_mlp_proj_w
ctx_size += n_layer*( n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_mlp_proj_b
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_v3_type_sizef(memory_type); // memory_k
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_v3_type_sizef(memory_type); // memory_v
ctx_size += (5 + 10*n_layer)*512; // object overhead
@ -156,15 +156,15 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
// create the ggml context
{
struct ggml_init_params params;
struct ggml_v3_init_params params;
params.mem_size = ctx_size;
params.mem_buffer = NULL;
params.no_alloc = false;
model.ctx = ggml_init(params);
model.ctx = ggml_v3_init(params);
if (!model.ctx) {
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
fprintf(stderr, "%s: ggml_v3_init() failed\n", __func__);
return ModelLoadResult::FAIL;
}
}
@ -179,13 +179,13 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
model.layers.resize(n_layer);
model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.wte = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
model.ln_f_g = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
model.ln_f_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
model.lmh_g = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.lmh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_vocab);
model.lmh_g = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.lmh_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_vocab);
// map by name
model.tensors["transformer.wte.weight"] = model.wte;
@ -199,20 +199,20 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
for (int i = 0; i < n_layer; ++i) {
auto & layer = model.layers[i];
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_1_g = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.ln_1_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.c_attn_q_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_k_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_v_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_q_proj_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_k_proj_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_v_proj_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_proj_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd);
layer.c_mlp_fc_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
layer.c_mlp_fc_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, 4*n_embd);
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.c_mlp_proj_w = ggml_v3_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
layer.c_mlp_proj_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
// map by name
model.tensors["transformer.h." + std::to_string(i) + ".ln_1.weight"] = layer.ln_1_g;
@ -243,10 +243,10 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
const int n_mem = n_layer*std::max(origmaxctx,n_ctx);
const int n_elements = n_embd*n_mem;
model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements);
model.memory_k = ggml_v3_new_tensor_1d(ctx, memory_type, n_elements);
model.memory_v = ggml_v3_new_tensor_1d(ctx, memory_type, n_elements);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
const size_t memory_size = ggml_v3_nbytes(model.memory_k) + ggml_v3_nbytes(model.memory_v);
printf("%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem);
}
@ -287,7 +287,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
}
auto tensor = model.tensors[name.data()];
if (ggml_nelements(tensor) != nelements) {
if (ggml_v3_nelements(tensor) != nelements) {
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
return ModelLoadResult::FAIL;
}
@ -299,7 +299,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
if(tensor->ne[0]==ne[1] && tensor->ne[1]==ne[0] && should_transpose_layer(name))
{
printf("\nFound a transposed tensor. This could be an older or newer model. Retrying load...");
ggml_free(ctx);
ggml_v3_free(ctx);
return ModelLoadResult::RETRY_LOAD;
}
else
@ -313,21 +313,21 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
// for debugging
if (0) {
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_v3_type_name(ggml_v3_type(ttype)), ggml_v3_nbytes(tensor)/1024.0/1024.0, ggml_v3_nbytes(tensor));
}
const size_t bpe = ggml_type_size(ggml_type(ttype));
const size_t bpe = ggml_v3_type_size(ggml_v3_type(ttype));
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
if ((nelements*bpe)/ggml_v3_blck_size(tensor->type) != ggml_v3_nbytes(tensor)) {
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
__func__, name.data(), ggml_v3_nbytes(tensor), nelements*bpe);
return ModelLoadResult::FAIL;
}
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
fin.read(reinterpret_cast<char *>(tensor->data), ggml_v3_nbytes(tensor));
//printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ttype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
total_size += ggml_nbytes(tensor);
//printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ttype == 0 ? "float" : "f16", ggml_v3_nbytes(tensor)/1024.0/1024.0);
total_size += ggml_v3_nbytes(tensor);
if (++n_tensors % 8 == 0) {
printf(".");
fflush(stdout);
@ -355,26 +355,26 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
#endif
for (int i = 0; i < n_gpu; ++i) {
const auto & layer = model.layers[i];
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_k_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_v_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_q_proj_w->backend = GGML_V3_BACKEND_GPU;
layer.c_attn_k_proj_w->backend = GGML_V3_BACKEND_GPU;
layer.c_attn_v_proj_w->backend = GGML_V3_BACKEND_GPU;
layer.c_attn_proj_w->backend = GGML_V3_BACKEND_GPU;
layer.c_mlp_fc_w->backend = GGML_V3_BACKEND_GPU;
layer.c_mlp_proj_w->backend = GGML_V3_BACKEND_GPU;
#if defined(GGML_USE_CLBLAST)
ggml_cl_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
ggml_cl_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
ggml_cl_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
ggml_v3_cl_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_q_proj_w);
ggml_v3_cl_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_k_proj_w);
ggml_v3_cl_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_v_proj_w);
ggml_v3_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_proj_w);
ggml_v3_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_v3_nbytes(layer.c_mlp_fc_w);
ggml_v3_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_v3_nbytes(layer.c_mlp_proj_w);
#else
ggml_cuda_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
ggml_cuda_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
ggml_cuda_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
ggml_v3_cuda_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_q_proj_w);
ggml_v3_cuda_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_k_proj_w);
ggml_v3_cuda_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_v_proj_w);
ggml_v3_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_proj_w);
ggml_v3_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_v3_nbytes(layer.c_mlp_fc_w);
ggml_v3_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_v3_nbytes(layer.c_mlp_proj_w);
#endif
}
#if defined(GGML_USE_CLBLAST)
@ -448,45 +448,45 @@ bool gptj_eval(
}
}
struct ggml_init_params params;
struct ggml_v3_init_params params;
params.mem_size = buf_size;
params.mem_buffer = buf;
params.no_alloc = false;
struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, GGML_MAX_NODES, false);
struct ggml_v3_context * ctx0 = ggml_v3_init(params);
struct ggml_v3_cgraph * gf = ggml_v3_new_graph_custom(ctx0, GGML_V3_MAX_NODES, false);
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
struct ggml_v3_tensor * embd = ggml_v3_new_tensor_1d(ctx0, GGML_V3_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_v3_element_size(embd));
// wte
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd);
struct ggml_v3_tensor * inpL = ggml_v3_get_rows(ctx0, model.wte, embd);
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * cur;
struct ggml_v3_tensor * cur;
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
ggml_v3_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// norm
{
cur = ggml_norm(ctx0, inpL, default_norm_eps);
cur = ggml_v3_norm(ctx0, inpL, default_norm_eps);
// cur = ln_1_g*cur + ln_1_b
cur = ggml_add(ctx0,
ggml_mul(ctx0,
ggml_repeat(ctx0, model.layers[il].ln_1_g, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_mul(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].ln_1_g, cur),
cur),
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
ggml_v3_repeat(ctx0, model.layers[il].ln_1_b, cur));
}
struct ggml_tensor * inpSA = cur;
struct ggml_v3_tensor * inpSA = cur;
// self-attention
{
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
struct ggml_v3_tensor * KQ_pos = ggml_v3_new_tensor_1d(ctx0, GGML_V3_TYPE_I32, N);
{
int * data = (int *) KQ_pos->data;
for (int i = 0; i < N; ++i) {
@ -494,170 +494,170 @@ bool gptj_eval(
}
}
struct ggml_tensor *Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd / n_head, n_head, N), KQ_pos, n_rot, 0, n_ctx, 0, freq_base, freq_scale, 0, 1, 32, 1);
struct ggml_tensor *Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd / n_head, n_head, N), KQ_pos, n_rot, 0, n_ctx, 0, freq_base, freq_scale, 0, 1, 32, 1);
struct ggml_v3_tensor *Qcur = ggml_v3_rope_custom_inplace(ctx0, ggml_v3_reshape_3d(ctx0, ggml_v3_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd / n_head, n_head, N), KQ_pos, n_rot, 0, n_ctx, 0, freq_base, freq_scale, 0, 1, 32, 1);
struct ggml_v3_tensor *Kcur = ggml_v3_rope_custom_inplace(ctx0, ggml_v3_reshape_3d(ctx0, ggml_v3_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd / n_head, n_head, N), KQ_pos, n_rot, 0, n_ctx, 0, freq_base, freq_scale, 0, 1, 32, 1);
// store key and value to memory
{
struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_v_proj_w, cur));
struct ggml_v3_tensor * Vcur = ggml_v3_transpose(ctx0, ggml_v3_mul_mat(ctx0, model.layers[il].c_attn_v_proj_w, cur));
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
struct ggml_tensor * v = ggml_view_2d(ctx0, model.memory_v, N, n_embd,
( n_ctx)*ggml_element_size(model.memory_v),
(il*n_ctx)*ggml_element_size(model.memory_v)*n_embd + n_past*ggml_element_size(model.memory_v));
struct ggml_v3_tensor * k = ggml_v3_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_v3_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
struct ggml_v3_tensor * v = ggml_v3_view_2d(ctx0, model.memory_v, N, n_embd,
( n_ctx)*ggml_v3_element_size(model.memory_v),
(il*n_ctx)*ggml_v3_element_size(model.memory_v)*n_embd + n_past*ggml_v3_element_size(model.memory_v));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
ggml_v3_build_forward_expand(gf, ggml_v3_cpy(ctx0, Kcur, k));
ggml_v3_build_forward_expand(gf, ggml_v3_cpy(ctx0, Vcur, v));
}
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
struct ggml_tensor * Q =
ggml_permute(ctx0,
struct ggml_v3_tensor * Q =
ggml_v3_permute(ctx0,
Qcur,
0, 2, 1, 3);
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
struct ggml_tensor * K =
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
struct ggml_v3_tensor * K =
ggml_v3_permute(ctx0,
ggml_v3_reshape_3d(ctx0,
ggml_v3_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_v3_element_size(model.memory_k)*n_embd),
n_embd/n_head, n_head, n_past + N),
0, 2, 1, 3);
// K * Q
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
struct ggml_v3_tensor * KQ = ggml_v3_mul_mat(ctx0, K, Q);
// KQ_scaled = KQ / sqrt(n_embd/n_head)
struct ggml_tensor * KQ_scaled =
ggml_scale_inplace(ctx0,
struct ggml_v3_tensor * KQ_scaled =
ggml_v3_scale_inplace(ctx0,
KQ,
1.0f/sqrt(float(n_embd)/n_head)
);
// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
struct ggml_v3_tensor * KQ_masked = ggml_v3_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
// KQ = soft_max(KQ_masked)
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
struct ggml_v3_tensor * KQ_soft_max = ggml_v3_soft_max_inplace(ctx0, KQ_masked);
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
struct ggml_tensor * V =
ggml_view_3d(ctx0, model.memory_v,
struct ggml_v3_tensor * V =
ggml_v3_view_3d(ctx0, model.memory_v,
n_past + N, n_embd/n_head, n_head,
n_ctx*ggml_element_size(model.memory_v),
n_ctx*ggml_element_size(model.memory_v)*n_embd/n_head,
il*n_ctx*ggml_element_size(model.memory_v)*n_embd);
n_ctx*ggml_v3_element_size(model.memory_v),
n_ctx*ggml_v3_element_size(model.memory_v)*n_embd/n_head,
il*n_ctx*ggml_v3_element_size(model.memory_v)*n_embd);
// KQV = transpose(V) * KQ_soft_max
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
struct ggml_v3_tensor * KQV = ggml_v3_mul_mat(ctx0, V, KQ_soft_max);
// KQV_merged = KQV.permute(0, 2, 1, 3)
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
struct ggml_v3_tensor * KQV_merged = ggml_v3_permute(ctx0, KQV, 0, 2, 1, 3);
// cur = KQV_merged.contiguous().view(n_embd, N)
cur = ggml_cpy(ctx0,
cur = ggml_v3_cpy(ctx0,
KQV_merged,
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
ggml_v3_new_tensor_2d(ctx0, GGML_V3_TYPE_F32, n_embd, N));
// projection (no bias)
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
model.layers[il].c_attn_proj_w,
cur);
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
ggml_v3_set_scratch(ctx0, { 0, scr1_size, scr1, });
}
struct ggml_tensor * inpFF = cur;
struct ggml_v3_tensor * inpFF = cur;
// feed-forward network
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
{
// note here we pass inpSA instead of cur
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
model.layers[il].c_mlp_fc_w,
inpSA);
cur = ggml_add(ctx0,
ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
cur);
// GELU activation
cur = ggml_gelu(ctx0, cur);
cur = ggml_v3_gelu(ctx0, cur);
// projection
// cur = proj_w*cur + proj_b
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
model.layers[il].c_mlp_proj_w,
cur);
cur = ggml_add(ctx0,
ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
cur);
}
// self-attention + FF
cur = ggml_add(ctx0, cur, inpFF);
cur = ggml_v3_add(ctx0, cur, inpFF);
// input for next layer
inpL = ggml_add(ctx0, cur, inpL);
inpL = ggml_v3_add(ctx0, cur, inpL);
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
ggml_v3_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// norm
{
inpL = ggml_norm(ctx0, inpL, default_norm_eps);
inpL = ggml_v3_norm(ctx0, inpL, default_norm_eps);
// inpL = ln_f_g*inpL + ln_f_b
inpL = ggml_add(ctx0,
ggml_mul(ctx0,
ggml_repeat(ctx0, model.ln_f_g, inpL),
inpL = ggml_v3_add(ctx0,
ggml_v3_mul(ctx0,
ggml_v3_repeat(ctx0, model.ln_f_g, inpL),
inpL),
ggml_repeat(ctx0, model.ln_f_b, inpL));
ggml_v3_repeat(ctx0, model.ln_f_b, inpL));
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
ggml_v3_set_scratch(ctx0, { 0, 0, nullptr, });
}
// lm_head
{
inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL);
inpL = ggml_v3_mul_mat(ctx0, model.lmh_g, inpL);
inpL = ggml_add(ctx0,
ggml_repeat(ctx0, model.lmh_b, inpL),
inpL = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, model.lmh_b, inpL),
inpL);
}
// logits -> probs
//inpL = ggml_soft_max_inplace(ctx0, inpL);
//inpL = ggml_v3_soft_max_inplace(ctx0, inpL);
// run the computation
ggml_build_forward_expand(gf, inpL);
ggml_v3_build_forward_expand(gf, inpL);
kcpp_graph_compute_helper(gf, n_threads);
//if (n_past%100 == 0) {
// ggml_graph_print (&gf);
// ggml_graph_dump_dot(&gf, NULL, "gpt-j.dot");
// ggml_v3_graph_print (&gf);
// ggml_v3_graph_dump_dot(&gf, NULL, "gpt-j.dot");
//}
//embd_w.resize(n_vocab*N);
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
//memcpy(embd_w.data(), ggml_v3_get_data(inpL), sizeof(float)*n_vocab*N);
// return result for just the last token
embd_w.resize(n_vocab);
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
memcpy(embd_w.data(), (float *) ggml_v3_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
if (mem_per_token == 0) {
mem_per_token = ggml_used_mem(ctx0)/N;
mem_per_token = ggml_v3_used_mem(ctx0)/N;
}
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
//printf("used_mem = %zu\n", ggml_v3_used_mem(ctx0));
ggml_free(ctx0);
ggml_v3_free(ctx0);
return true;
}

File diff suppressed because it is too large Load diff

View file

@ -1,10 +1,10 @@
#ifndef LLAMA_V3_H
#define LLAMA_V3_H
#include "ggml.h"
#include "ggml_v3.h"
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#define LLAMA_V3_MAX_DEVICES GGML_CUDA_MAX_DEVICES
#include "ggml_v3-cuda.h"
#define LLAMA_V3_MAX_DEVICES GGML_V3_CUDA_MAX_DEVICES
#else
#define LLAMA_V3_MAX_DEVICES 1
#endif // GGML_USE_CUBLAS
@ -477,9 +477,9 @@ extern "C" {
#include <vector>
#include <string>
struct ggml_tensor;
struct ggml_v3_tensor;
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_v3_internal_get_tensor_map(struct llama_v3_context * ctx);
const std::vector<std::pair<std::string, struct ggml_v3_tensor *>>& llama_v3_internal_get_tensor_map(struct llama_v3_context * ctx);
#endif

View file

@ -1,4 +1,4 @@
#include "ggml.h"
#include "ggml_v3.h"
#include "otherarch.h"
#include "utils.h"
@ -17,10 +17,10 @@
#include "model_adapter.h"
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#include "ggml_v3-cuda.h"
#endif
#if defined(GGML_USE_CLBLAST)
#include "ggml-opencl.h"
#include "ggml_v3-opencl.h"
#endif
// load the model's weights from a file
@ -58,7 +58,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
hparams.n_ctx = std::min(hparams.max_seq_len, hparams.n_ctx);
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
const int32_t qntvr = hparams.ftype / GGML_V3_QNT_VERSION_FACTOR;
printf("%s: d_model = %d\n", __func__, hparams.d_model);
printf("%s: max_seq_len = %d\n", __func__, hparams.max_seq_len);
@ -71,7 +71,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
printf("%s: ftype = %d\n", __func__, hparams.ftype);
printf("%s: qntvr = %d\n", __func__, qntvr);
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
hparams.ftype %= GGML_V3_QNT_VERSION_FACTOR;
}
// load vocab
@ -107,8 +107,8 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
// for the big tensors, we have the option to store the data in 16-bit
// floats or quantized in order to save memory and also to speed up the
// computation
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype)(model.hparams.ftype));
if (wtype == GGML_TYPE_COUNT) {
ggml_v3_type wtype = ggml_v3_ftype_to_ggml_v3_type((ggml_v3_ftype)(model.hparams.ftype));
if (wtype == GGML_V3_TYPE_COUNT) {
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, fname.c_str(),
model.hparams.ftype);
return false;
@ -126,18 +126,18 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
const size_t n_layer = hparams.n_layers;
const size_t n_vocab = hparams.n_vocab;
ctx_size += n_embd * n_vocab * ggml_type_sizef(wtype); // wte_weight
ctx_size += n_embd * ggml_type_sizef(GGML_TYPE_F32); // norm_f_weight
ctx_size += n_embd * n_vocab * ggml_v3_type_sizef(wtype); // wte_weight
ctx_size += n_embd * ggml_v3_type_sizef(GGML_V3_TYPE_F32); // norm_f_weight
ctx_size += n_layer * (n_embd * ggml_type_sizef(GGML_TYPE_F32)); // ln_1_weight
ctx_size += n_layer * (3 * n_embd * n_embd * ggml_type_sizef(wtype)); // attn_Wqkv_weight
ctx_size += n_layer * (n_embd * n_embd * ggml_type_sizef(wtype)); // attn_out_proj_weight
ctx_size += n_layer * (n_embd * ggml_type_sizef(GGML_TYPE_F32)); // ln_2_weight
ctx_size += n_layer * (4 * n_embd * n_embd * ggml_type_sizef(wtype)); // mlp_mlp_up_weight
ctx_size += n_layer * (n_embd * n_embd * 4 * ggml_type_sizef(wtype)); // mlp_mlp_down_weight
ctx_size += n_layer * (n_embd * ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_1_weight
ctx_size += n_layer * (3 * n_embd * n_embd * ggml_v3_type_sizef(wtype)); // attn_Wqkv_weight
ctx_size += n_layer * (n_embd * n_embd * ggml_v3_type_sizef(wtype)); // attn_out_proj_weight
ctx_size += n_layer * (n_embd * ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_2_weight
ctx_size += n_layer * (4 * n_embd * n_embd * ggml_v3_type_sizef(wtype)); // mlp_mlp_up_weight
ctx_size += n_layer * (n_embd * n_embd * 4 * ggml_v3_type_sizef(wtype)); // mlp_mlp_down_weight
ctx_size += n_ctx * n_layer * n_embd * ggml_type_sizef(GGML_TYPE_F16); // memory_k
ctx_size += n_ctx * n_layer * n_embd * ggml_type_sizef(GGML_TYPE_F16); // memory_v
ctx_size += n_ctx * n_layer * n_embd * ggml_v3_type_sizef(GGML_V3_TYPE_F16); // memory_k
ctx_size += n_ctx * n_layer * n_embd * ggml_v3_type_sizef(GGML_V3_TYPE_F16); // memory_v
ctx_size += (6 + 6 * n_layer) * 512; // object overhead
@ -146,14 +146,14 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
// create the ggml context
{
struct ggml_init_params params;
struct ggml_v3_init_params params;
params.mem_size = ctx_size;
params.mem_buffer = NULL;
params.no_alloc = false;
model.ctx = ggml_init(params);
model.ctx = ggml_v3_init(params);
if (!model.ctx) {
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
fprintf(stderr, "%s: ggml_v3_init() failed\n", __func__);
return false;
}
}
@ -168,8 +168,8 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
model.layers.resize(n_layer);
model.wte_weight = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.norm_f_weight = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
model.wte_weight = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.norm_f_weight = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
// map by name
model.tensors["transformer.wte.weight"] = model.wte_weight;
@ -178,12 +178,12 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
for (int i = 0; i < (int) n_layer; ++i) {
auto & layer = model.layers[i];
layer.norm_1_weight = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.c_attn_wqkv_weight = ggml_new_tensor_2d(ctx, wtype, n_embd, 3 * n_embd);
layer.c_attn_out_proj_weight = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.norm_2_weight = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ffn_up_proj = ggml_new_tensor_2d(ctx, wtype, n_embd, 4 * n_embd);
layer.ffn_down_proj = ggml_new_tensor_2d(ctx, wtype, 4 * n_embd, n_embd);
layer.norm_1_weight = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.c_attn_wqkv_weight = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, 3 * n_embd);
layer.c_attn_out_proj_weight = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.norm_2_weight = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.ffn_up_proj = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, 4 * n_embd);
layer.ffn_down_proj = ggml_v3_new_tensor_2d(ctx, wtype, 4 * n_embd, n_embd);
// map by name
model.tensors["transformer.blocks." + std::to_string(i) + ".norm_1.weight"] = layer.norm_1_weight;
@ -205,10 +205,10 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
const int64_t n_mem = n_layer * n_ctx;
const int64_t n_elements = n_embd * n_mem;
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_k = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F16, n_elements);
model.memory_v = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F16, n_elements);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
const size_t memory_size = ggml_v3_nbytes(model.memory_k) + ggml_v3_nbytes(model.memory_v);
printf("%s: memory_size = %8.2f MB, n_mem = %" PRId64 "\n", __func__, memory_size / 1024.0 / 1024.0, n_mem);
}
@ -249,7 +249,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
}
auto tensor = model.tensors[name.data()];
if (ggml_nelements(tensor) != nelements) {
if (ggml_v3_nelements(tensor) != nelements) {
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
return false;
}
@ -265,22 +265,22 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
// for debugging
if (0) {
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1],
ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor) / 1024.0 / 1024.0, ggml_nbytes(tensor));
ggml_v3_type_name(ggml_v3_type(ttype)), ggml_v3_nbytes(tensor) / 1024.0 / 1024.0, ggml_v3_nbytes(tensor));
}
const size_t bpe = ggml_type_size(ggml_type(ttype));
const size_t bpe = ggml_v3_type_size(ggml_v3_type(ttype));
if ((nelements * bpe) / ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
if ((nelements * bpe) / ggml_v3_blck_size(tensor->type) != ggml_v3_nbytes(tensor)) {
fprintf(stderr,
"%s: tensor '%s' has wrong size in model file: got %zu, "
"expected %zu\n",
__func__, name.data(), ggml_nbytes(tensor), nelements * bpe);
__func__, name.data(), ggml_v3_nbytes(tensor), nelements * bpe);
return false;
}
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
fin.read(reinterpret_cast<char *>(tensor->data), ggml_v3_nbytes(tensor));
total_size += ggml_nbytes(tensor);
total_size += ggml_v3_nbytes(tensor);
if (++n_tensors % 8 == 0) {
printf(".");
fflush(stdout);
@ -308,20 +308,20 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
#endif
for (int i = 0; i < n_gpu; ++i) {
const auto & layer = model.layers[i];
layer.ffn_up_proj->backend = GGML_BACKEND_GPU;
layer.ffn_down_proj->backend = GGML_BACKEND_GPU;
layer.c_attn_wqkv_weight->backend = GGML_BACKEND_GPU;
layer.c_attn_out_proj_weight->backend = GGML_BACKEND_GPU;
layer.ffn_up_proj->backend = GGML_V3_BACKEND_GPU;
layer.ffn_down_proj->backend = GGML_V3_BACKEND_GPU;
layer.c_attn_wqkv_weight->backend = GGML_V3_BACKEND_GPU;
layer.c_attn_out_proj_weight->backend = GGML_V3_BACKEND_GPU;
#if defined(GGML_USE_CLBLAST)
ggml_cl_transform_tensor(layer.ffn_up_proj->data,layer.ffn_up_proj); vram_total += ggml_nbytes(layer.ffn_up_proj);
ggml_cl_transform_tensor(layer.ffn_down_proj->data,layer.ffn_down_proj); vram_total += ggml_nbytes(layer.ffn_down_proj);
ggml_cl_transform_tensor(layer.c_attn_wqkv_weight->data,layer.c_attn_wqkv_weight); vram_total += ggml_nbytes(layer.c_attn_wqkv_weight);
ggml_cl_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_nbytes(layer.c_attn_out_proj_weight);
ggml_v3_cl_transform_tensor(layer.ffn_up_proj->data,layer.ffn_up_proj); vram_total += ggml_v3_nbytes(layer.ffn_up_proj);
ggml_v3_cl_transform_tensor(layer.ffn_down_proj->data,layer.ffn_down_proj); vram_total += ggml_v3_nbytes(layer.ffn_down_proj);
ggml_v3_cl_transform_tensor(layer.c_attn_wqkv_weight->data,layer.c_attn_wqkv_weight); vram_total += ggml_v3_nbytes(layer.c_attn_wqkv_weight);
ggml_v3_cl_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_v3_nbytes(layer.c_attn_out_proj_weight);
#else
ggml_cuda_transform_tensor(layer.ffn_up_proj->data,layer.ffn_up_proj); vram_total += ggml_nbytes(layer.ffn_up_proj);
ggml_cuda_transform_tensor(layer.ffn_down_proj->data,layer.ffn_down_proj); vram_total += ggml_nbytes(layer.ffn_down_proj);
ggml_cuda_transform_tensor(layer.c_attn_wqkv_weight->data,layer.c_attn_wqkv_weight); vram_total += ggml_nbytes(layer.c_attn_wqkv_weight);
ggml_cuda_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_nbytes(layer.c_attn_out_proj_weight);
ggml_v3_cuda_transform_tensor(layer.ffn_up_proj->data,layer.ffn_up_proj); vram_total += ggml_v3_nbytes(layer.ffn_up_proj);
ggml_v3_cuda_transform_tensor(layer.ffn_down_proj->data,layer.ffn_down_proj); vram_total += ggml_v3_nbytes(layer.ffn_down_proj);
ggml_v3_cuda_transform_tensor(layer.c_attn_wqkv_weight->data,layer.c_attn_wqkv_weight); vram_total += ggml_v3_nbytes(layer.c_attn_wqkv_weight);
ggml_v3_cuda_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_v3_nbytes(layer.c_attn_out_proj_weight);
#endif
}
#if defined(GGML_USE_CLBLAST)
@ -384,32 +384,32 @@ bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past,
}
}
struct ggml_init_params params;
struct ggml_v3_init_params params;
params.mem_size = buf_size;
params.mem_buffer = buf;
params.no_alloc = false;
struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, GGML_MAX_NODES, false);
struct ggml_v3_context * ctx0 = ggml_v3_init(params);
struct ggml_v3_cgraph * gf = ggml_v3_new_graph_custom(ctx0, GGML_V3_MAX_NODES, false);
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N * ggml_element_size(embd));
struct ggml_v3_tensor * embd = ggml_v3_new_tensor_1d(ctx0, GGML_V3_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N * ggml_v3_element_size(embd));
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte_weight, embd);
struct ggml_v3_tensor * inpL = ggml_v3_get_rows(ctx0, model.wte_weight, embd);
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * cur;
struct ggml_v3_tensor * cur;
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
ggml_v3_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// a = self.ln_1(x)
{
cur = ggml_norm(ctx0, inpL, default_norm_eps);
cur = ggml_v3_norm(ctx0, inpL, default_norm_eps);
cur = ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].norm_1_weight, cur), cur);
cur = ggml_v3_mul(ctx0, ggml_v3_repeat(ctx0, model.layers[il].norm_1_weight, cur), cur);
}
// self-attention
@ -418,164 +418,164 @@ bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past,
// is_causal=is_causal)
{
// compute QKV
cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_wqkv_weight, cur);
cur = ggml_v3_mul_mat(ctx0, model.layers[il].c_attn_wqkv_weight, cur);
if (model.hparams.clip_qkv > 0.0f) {
cur = ggml_clamp(ctx0, cur, -model.hparams.clip_qkv, model.hparams.clip_qkv);
cur = ggml_v3_clamp(ctx0, cur, -model.hparams.clip_qkv, model.hparams.clip_qkv);
}
struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0 * sizeof(float) * n_embd);
struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1 * sizeof(float) * n_embd);
struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2 * sizeof(float) * n_embd);
struct ggml_v3_tensor * Qcur = ggml_v3_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0 * sizeof(float) * n_embd);
struct ggml_v3_tensor * Kcur = ggml_v3_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1 * sizeof(float) * n_embd);
struct ggml_v3_tensor * Vcur = ggml_v3_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2 * sizeof(float) * n_embd);
// store key and value to memory
{
struct ggml_tensor * k =
ggml_view_1d(ctx0, model.memory_k, N * n_embd,
(ggml_element_size(model.memory_k) * n_embd) * (il * n_ctx + n_past));
struct ggml_tensor * v =
ggml_view_1d(ctx0, model.memory_v, N * n_embd,
(ggml_element_size(model.memory_v) * n_embd) * (il * n_ctx + n_past));
struct ggml_v3_tensor * k =
ggml_v3_view_1d(ctx0, model.memory_k, N * n_embd,
(ggml_v3_element_size(model.memory_k) * n_embd) * (il * n_ctx + n_past));
struct ggml_v3_tensor * v =
ggml_v3_view_1d(ctx0, model.memory_v, N * n_embd,
(ggml_v3_element_size(model.memory_v) * n_embd) * (il * n_ctx + n_past));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
ggml_v3_build_forward_expand(gf, ggml_v3_cpy(ctx0, Kcur, k));
ggml_v3_build_forward_expand(gf, ggml_v3_cpy(ctx0, Vcur, v));
}
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0,
// 2, 1, 3) [64, N, 12]
struct ggml_tensor * Q = ggml_permute(
ctx0, ggml_cpy(ctx0, Qcur, ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd / n_head, n_head, N)), 0, 2,
struct ggml_v3_tensor * Q = ggml_v3_permute(
ctx0, ggml_v3_cpy(ctx0, Qcur, ggml_v3_new_tensor_3d(ctx0, GGML_V3_TYPE_F32, n_embd / n_head, n_head, N)), 0, 2,
1, 3);
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1,
// 3) [64, n_past + N, 12]
struct ggml_tensor * K =
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_k, (n_past + N) * n_embd,
il * n_ctx * ggml_element_size(model.memory_k) * n_embd),
struct ggml_v3_tensor * K =
ggml_v3_permute(ctx0,
ggml_v3_reshape_3d(ctx0,
ggml_v3_view_1d(ctx0, model.memory_k, (n_past + N) * n_embd,
il * n_ctx * ggml_v3_element_size(model.memory_k) * n_embd),
n_embd / n_head, n_head, n_past + N),
0, 2, 1, 3);
// K * Q
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
struct ggml_v3_tensor * KQ = ggml_v3_mul_mat(ctx0, K, Q);
// KQ_scaled = KQ / sqrt(n_embd/n_head)
struct ggml_tensor * KQ_scaled =
ggml_scale(ctx0, KQ, 1.0f / sqrt(float(n_embd) / n_head));
struct ggml_v3_tensor * KQ_scaled =
ggml_v3_scale(ctx0, KQ, 1.0f / sqrt(float(n_embd) / n_head));
struct ggml_tensor * KQ_scaled_alibi =
ggml_alibi(ctx0, KQ_scaled, n_past, n_head, model.hparams.alibi_bias_max);
struct ggml_v3_tensor * KQ_scaled_alibi =
ggml_v3_alibi(ctx0, KQ_scaled, n_past, n_head, model.hparams.alibi_bias_max);
// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled_alibi, n_past);
struct ggml_v3_tensor * KQ_masked = ggml_v3_diag_mask_inf(ctx0, KQ_scaled_alibi, n_past);
// KQ = soft_max(KQ_masked)
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
struct ggml_v3_tensor * KQ_soft_max = ggml_v3_soft_max(ctx0, KQ_masked);
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1,
// 2, 0, 3).contiguous() [n_past + N, 64, 12]
struct ggml_tensor * V_trans = ggml_cpy(
struct ggml_v3_tensor * V_trans = ggml_v3_cpy(
ctx0,
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_v, (n_past + N) * n_embd,
il * n_ctx * ggml_element_size(model.memory_v) * n_embd),
ggml_v3_permute(ctx0,
ggml_v3_reshape_3d(ctx0,
ggml_v3_view_1d(ctx0, model.memory_v, (n_past + N) * n_embd,
il * n_ctx * ggml_v3_element_size(model.memory_v) * n_embd),
n_embd / n_head, n_head, n_past + N),
1, 2, 0, 3),
ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd / n_head, n_head));
ggml_v3_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd / n_head, n_head));
// KQV = transpose(V) * KQ_soft_max
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
struct ggml_v3_tensor * KQV = ggml_v3_mul_mat(ctx0, V_trans, KQ_soft_max);
// KQV_merged = KQV.permute(0, 2, 1, 3)
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
struct ggml_v3_tensor * KQV_merged = ggml_v3_permute(ctx0, KQV, 0, 2, 1, 3);
// cur = KQV_merged.contiguous().view(n_embd, N)
cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
cur = ggml_v3_cpy(ctx0, KQV_merged, ggml_v3_new_tensor_2d(ctx0, GGML_V3_TYPE_F32, n_embd, N));
// projection
{ cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_out_proj_weight, cur); }
{ cur = ggml_v3_mul_mat(ctx0, model.layers[il].c_attn_out_proj_weight, cur); }
}
inpL = ggml_add(ctx0, inpL, cur);
inpL = ggml_v3_add(ctx0, inpL, cur);
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
ggml_v3_set_scratch(ctx0, { 0, scr1_size, scr1, });
}
// m = self.ln_2(x)
{
cur = ggml_norm(ctx0, inpL, default_norm_eps);
cur = ggml_v3_norm(ctx0, inpL, default_norm_eps);
cur = ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].norm_2_weight, cur), cur);
cur = ggml_v3_mul(ctx0, ggml_v3_repeat(ctx0, model.layers[il].norm_2_weight, cur), cur);
}
// n = self.mlp(m)
{
cur = ggml_mul_mat(ctx0, model.layers[il].ffn_up_proj, cur);
cur = ggml_v3_mul_mat(ctx0, model.layers[il].ffn_up_proj, cur);
// GELU activation
cur = ggml_gelu(ctx0, cur);
cur = ggml_v3_gelu(ctx0, cur);
// projection
// cur = proj_w*cur + proj_b
cur = ggml_mul_mat(ctx0, model.layers[il].ffn_down_proj, cur);
cur = ggml_v3_mul_mat(ctx0, model.layers[il].ffn_down_proj, cur);
}
// x = x + n
inpL = ggml_add(ctx0, inpL, cur);
inpL = ggml_v3_add(ctx0, inpL, cur);
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
ggml_v3_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// norm
{
inpL = ggml_norm(ctx0, inpL, default_norm_eps);
inpL = ggml_v3_norm(ctx0, inpL, default_norm_eps);
// inpL = ln_f_g*inpL
inpL = ggml_mul(ctx0, ggml_repeat(ctx0, model.norm_f_weight, inpL), inpL);
inpL = ggml_v3_mul(ctx0, ggml_v3_repeat(ctx0, model.norm_f_weight, inpL), inpL);
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
ggml_v3_set_scratch(ctx0, { 0, 0, nullptr, });
}
// output embedding weight tied to input embedding
inpL = ggml_mul_mat(ctx0, model.wte_weight, inpL);
inpL = ggml_v3_mul_mat(ctx0, model.wte_weight, inpL);
// logits -> probs
// inpL = ggml_soft_max(ctx0, inpL);
// inpL = ggml_v3_soft_max(ctx0, inpL);
// run the computation
ggml_build_forward_expand(gf, inpL);
ggml_v3_build_forward_expand(gf, inpL);
kcpp_graph_compute_helper(gf, n_threads);
// std::cout << "Qcur" << std::endl;
// print_tensor(Qcur);
// if (n_past%100 == 0) {
// ggml_graph_print(&gf);
// ggml_graph_dump_dot(&gf, NULL, "mpt-model.dot");
// ggml_v3_graph_print(&gf);
// ggml_v3_graph_dump_dot(&gf, NULL, "mpt-model.dot");
// }
if (logits_all) {
// return result for all tokens
embd_w.resize(n_vocab *N);
memcpy(embd_w.data(), (float *)ggml_get_data(inpL) , sizeof(float) * n_vocab * N);
memcpy(embd_w.data(), (float *)ggml_v3_get_data(inpL) , sizeof(float) * n_vocab * N);
} else {
// return result for just the last token
embd_w.resize(n_vocab);
memcpy(embd_w.data(), (float *)ggml_get_data(inpL) + (n_vocab * (N - 1)), sizeof(float) * n_vocab);
memcpy(embd_w.data(), (float *)ggml_v3_get_data(inpL) + (n_vocab * (N - 1)), sizeof(float) * n_vocab);
}
if (mem_per_token == 0) {
mem_per_token = ggml_used_mem(ctx0) / N;
mem_per_token = ggml_v3_used_mem(ctx0) / N;
}
// printf("used_mem = %zu\n", ggml_used_mem(ctx0));
// printf("used_mem = %zu\n", ggml_v3_used_mem(ctx0));
ggml_free(ctx0);
ggml_v3_free(ctx0);
return true;
}

View file

@ -1,4 +1,4 @@
#include "ggml.h"
#include "ggml_v3.h"
#include "otherarch.h"
#include "utils.h"
@ -15,10 +15,10 @@
#include <algorithm>
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#include "ggml_v3-cuda.h"
#endif
#if defined(GGML_USE_CLBLAST)
#include "ggml-opencl.h"
#include "ggml_v3-opencl.h"
#endif
// load the model's weights from a file
@ -56,7 +56,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
fin.read((char *) &hparams.par_res, sizeof(hparams.par_res));
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
const int32_t qntvr = hparams.ftype / GGML_V3_QNT_VERSION_FACTOR;
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
printf("%s: n_ctx = %d (%d)\n", __func__, hparams.n_ctx,origmaxctx);
@ -70,7 +70,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
hparams.n_ctx = std::max(origmaxctx,hparams.n_ctx);
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
hparams.ftype %= GGML_V3_QNT_VERSION_FACTOR;
}
// load vocab
@ -96,8 +96,8 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
// in order to save memory and also to speed up the computation
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype));
if (wtype == GGML_TYPE_COUNT) {
ggml_v3_type wtype = ggml_v3_ftype_to_ggml_v3_type((ggml_v3_ftype) (model.hparams.ftype));
if (wtype == GGML_V3_TYPE_COUNT) {
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n",
__func__, fname.c_str(), model.hparams.ftype);
return ModelLoadResult::FAIL;
@ -115,34 +115,34 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
const size_t n_ctx = hparams.n_ctx;
const size_t n_vocab = hparams.n_vocab;
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b
ctx_size += n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32); // ln_f_g
ctx_size += n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32); // ln_f_b
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // wte
ctx_size += n_embd*n_vocab*ggml_v3_type_sizef(wtype); // wte
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // lmh_g
//ctx_size += n_vocab*ggml_type_sizef(GGML_TYPE_F32); // lmh_b
ctx_size += n_embd*n_vocab*ggml_v3_type_sizef(wtype); // lmh_g
//ctx_size += n_vocab*ggml_v3_type_sizef(GGML_V3_TYPE_F32); // lmh_b
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_1_g
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_1_b
ctx_size += n_layer*(3*n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_attn_w
ctx_size += n_layer*( 3*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_attn_b
ctx_size += n_layer*(3*n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_attn_attn_w
ctx_size += n_layer*( 3*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_attn_attn_b
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_proj_b
ctx_size += n_layer*(n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_attn_proj_w
ctx_size += n_layer*(n_embd*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_attn_proj_b
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_g
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_b
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_2_g
ctx_size += n_layer*(n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // ln_2_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w
ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_mlp_fc_w
ctx_size += n_layer*( 4*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_mlp_fc_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v3_type_sizef(wtype)); // c_mlp_proj_w
ctx_size += n_layer*( n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F32)); // c_mlp_proj_b
ctx_size += std::max((size_t)origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
ctx_size += std::max((size_t)origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
ctx_size += std::max((size_t)origmaxctx,n_ctx)*n_layer*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F16); // memory_k
ctx_size += std::max((size_t)origmaxctx,n_ctx)*n_layer*n_embd*ggml_v3_type_sizef(GGML_V3_TYPE_F16); // memory_v
ctx_size += (6 + 16*n_layer)*1024; // object overhead
@ -151,14 +151,14 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
// create the ggml context
{
struct ggml_init_params params;
struct ggml_v3_init_params params;
params.mem_size = ctx_size;
params.mem_buffer = NULL;
params.no_alloc = false;
model.ctx = ggml_init(params);
model.ctx = ggml_v3_init(params);
if (!model.ctx) {
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
fprintf(stderr, "%s: ggml_v3_init() failed\n", __func__);
return ModelLoadResult::FAIL;
}
}
@ -173,13 +173,13 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
model.layers.resize(n_layer);
model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.wte = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
model.ln_f_g = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
model.ln_f_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
model.lmh_g = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
//model.lmh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_vocab);
model.lmh_g = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
//model.lmh_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_vocab);
// map by name
model.tensors["gpt_neox.embed_in.weight"] = model.wte;
@ -193,23 +193,23 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
for (int i = 0; i < n_layer; ++i) {
auto & layer = model.layers[i];
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_1_g = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.ln_1_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd);
layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd);
layer.c_attn_attn_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd);
layer.c_attn_attn_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, 3*n_embd);
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.c_attn_proj_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_proj_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_2_g = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.ln_2_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd);
layer.c_mlp_fc_w = ggml_v3_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
layer.c_mlp_fc_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, 4*n_embd);
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.c_mlp_proj_w = ggml_v3_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
layer.c_mlp_proj_b = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F32, n_embd);
// map by name
model.tensors["gpt_neox.layers." + std::to_string(i) + ".input_layernorm.weight"] = layer.ln_1_g;
@ -243,10 +243,10 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
const int64_t n_mem = n_layer*std::max(origmaxctx,n_ctx);
const int64_t n_elements = n_embd*n_mem;
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_k = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F16, n_elements);
model.memory_v = ggml_v3_new_tensor_1d(ctx, GGML_V3_TYPE_F16, n_elements);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
const size_t memory_size = ggml_v3_nbytes(model.memory_k) + ggml_v3_nbytes(model.memory_v);
printf("%s: memory_size = %8.2f MB, n_mem = %" PRId64 "\n", __func__, memory_size/1024.0/1024.0, n_mem);
}
@ -287,7 +287,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
}
auto tensor = model.tensors[name.data()];
if (ggml_nelements(tensor) != nelements) {
if (ggml_v3_nelements(tensor) != nelements) {
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
return ModelLoadResult::FAIL;
}
@ -300,21 +300,21 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
// for debugging
if (0) {
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_v3_type_name(ggml_v3_type(ttype)), ggml_v3_nbytes(tensor)/1024.0/1024.0, ggml_v3_nbytes(tensor));
}
const size_t bpe = ggml_type_size(ggml_type(ttype));
const size_t bpe = ggml_v3_type_size(ggml_v3_type(ttype));
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
if ((nelements*bpe)/ggml_v3_blck_size(tensor->type) != ggml_v3_nbytes(tensor)) {
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
ggml_free(ctx);
__func__, name.data(), ggml_v3_nbytes(tensor), nelements*bpe);
ggml_v3_free(ctx);
return ModelLoadResult::RETRY_LOAD;
}
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
fin.read(reinterpret_cast<char *>(tensor->data), ggml_v3_nbytes(tensor));
total_size += ggml_nbytes(tensor);
total_size += ggml_v3_nbytes(tensor);
if (++n_tensors % 8 == 0) {
printf(".");
fflush(stdout);
@ -342,20 +342,20 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
#endif
for (int i = 0; i < n_gpu; ++i) {
const auto & layer = model.layers[i];
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_attn_w->backend = GGML_V3_BACKEND_GPU;
layer.c_attn_proj_w->backend = GGML_V3_BACKEND_GPU;
layer.c_mlp_fc_w->backend = GGML_V3_BACKEND_GPU;
layer.c_mlp_proj_w->backend = GGML_V3_BACKEND_GPU;
#if defined(GGML_USE_CLBLAST)
ggml_cl_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
ggml_v3_cl_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_v3_nbytes(layer.c_attn_attn_w);
ggml_v3_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_proj_w);
ggml_v3_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_v3_nbytes(layer.c_mlp_fc_w);
ggml_v3_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_v3_nbytes(layer.c_mlp_proj_w);
#else
ggml_cuda_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
ggml_v3_cuda_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_v3_nbytes(layer.c_attn_attn_w);
ggml_v3_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_v3_nbytes(layer.c_attn_proj_w);
ggml_v3_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_v3_nbytes(layer.c_mlp_fc_w);
ggml_v3_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_v3_nbytes(layer.c_mlp_proj_w);
#endif
}
#if defined(GGML_USE_CLBLAST)
@ -371,37 +371,37 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
// feed-forward network
ggml_tensor * gpt_neox_ff(
ggml_v3_tensor * gpt_neox_ff(
const gpt_neox_layer &layer,
ggml_context * ctx0,
ggml_tensor * inp) {
ggml_tensor * cur = ggml_norm(ctx0, inp, default_norm_eps);
ggml_v3_context * ctx0,
ggml_v3_tensor * inp) {
ggml_v3_tensor * cur = ggml_v3_norm(ctx0, inp, default_norm_eps);
cur = ggml_add(ctx0,
ggml_mul(ctx0,
ggml_repeat(ctx0, layer.ln_2_g, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_mul(ctx0,
ggml_v3_repeat(ctx0, layer.ln_2_g, cur),
cur),
ggml_repeat(ctx0, layer.ln_2_b, cur));
ggml_v3_repeat(ctx0, layer.ln_2_b, cur));
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
layer.c_mlp_fc_w,
cur);
cur = ggml_add(ctx0,
ggml_repeat(ctx0, layer.c_mlp_fc_b, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, layer.c_mlp_fc_b, cur),
cur);
// GELU activation
cur = ggml_gelu(ctx0, cur);
cur = ggml_v3_gelu(ctx0, cur);
// projection
// cur = proj_w*cur + proj_b
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
layer.c_mlp_proj_w,
cur);
cur = ggml_add(ctx0,
ggml_repeat(ctx0, layer.c_mlp_proj_b, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, layer.c_mlp_proj_b, cur),
cur);
return cur;
}
@ -464,56 +464,56 @@ bool gpt_neox_eval(
}
}
struct ggml_init_params params;
struct ggml_v3_init_params params;
params.mem_size = buf_size;
params.mem_buffer = buf;
params.no_alloc = false;
struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, GGML_MAX_NODES, false);
struct ggml_v3_context * ctx0 = ggml_v3_init(params);
struct ggml_v3_cgraph * gf = ggml_v3_new_graph_custom(ctx0, GGML_V3_MAX_NODES, false);
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
struct ggml_v3_tensor * embd = ggml_v3_new_tensor_1d(ctx0, GGML_V3_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_v3_element_size(embd));
// wte
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd);
struct ggml_v3_tensor * inpL = ggml_v3_get_rows(ctx0, model.wte, embd);
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * cur;
struct ggml_v3_tensor * cur;
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
ggml_v3_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// self-attention
{
{
cur = ggml_norm(ctx0, inpL, default_norm_eps);
cur = ggml_v3_norm(ctx0, inpL, default_norm_eps);
cur = ggml_add(ctx0,
ggml_mul(ctx0,
ggml_repeat(ctx0, model.layers[il].ln_1_g, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_mul(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].ln_1_g, cur),
cur),
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
ggml_v3_repeat(ctx0, model.layers[il].ln_1_b, cur));
}
// compute QKV
{
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
model.layers[il].c_attn_attn_w,
cur);
cur = ggml_add(ctx0,
ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
cur = ggml_v3_add(ctx0,
ggml_v3_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
cur);
}
struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 0*sizeof(float)*n_embd/n_head));
struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 1*sizeof(float)*n_embd/n_head));
struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head));
struct ggml_v3_tensor * Qcur = ggml_v3_cont(ctx0, ggml_v3_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 0*sizeof(float)*n_embd/n_head));
struct ggml_v3_tensor * Kcur = ggml_v3_cont(ctx0, ggml_v3_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 1*sizeof(float)*n_embd/n_head));
struct ggml_v3_tensor * Vcur = ggml_v3_cont(ctx0, ggml_v3_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head));
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
struct ggml_v3_tensor * KQ_pos = ggml_v3_new_tensor_1d(ctx0, GGML_V3_TYPE_I32, N);
{
int * data = (int *) KQ_pos->data;
for (int i = 0; i < N; ++i) {
@ -522,161 +522,161 @@ bool gpt_neox_eval(
}
// using mode = 2 for GPT-NeoX mode
Qcur = ggml_rope_custom_inplace(ctx0, Qcur, KQ_pos, n_rot, 2, n_ctx, 0, freq_base, freq_scale, 0, 1, 32, 1);
Kcur = ggml_rope_custom_inplace(ctx0, Kcur, KQ_pos, n_rot, 2, n_ctx, 0, freq_base, freq_scale, 0, 1, 32, 1);
Qcur = ggml_v3_rope_custom_inplace(ctx0, Qcur, KQ_pos, n_rot, 2, n_ctx, 0, freq_base, freq_scale, 0, 1, 32, 1);
Kcur = ggml_v3_rope_custom_inplace(ctx0, Kcur, KQ_pos, n_rot, 2, n_ctx, 0, freq_base, freq_scale, 0, 1, 32, 1);
// store key and value to memory
{
Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd, N));
Vcur = ggml_v3_transpose(ctx0, ggml_v3_reshape_2d(ctx0, Vcur, n_embd, N));
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
struct ggml_tensor * v = ggml_view_2d(ctx0, model.memory_v, N, n_embd,
( n_ctx)*ggml_element_size(model.memory_v),
(il*n_ctx)*ggml_element_size(model.memory_v)*n_embd + n_past*ggml_element_size(model.memory_v));
struct ggml_v3_tensor * k = ggml_v3_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_v3_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
struct ggml_v3_tensor * v = ggml_v3_view_2d(ctx0, model.memory_v, N, n_embd,
( n_ctx)*ggml_v3_element_size(model.memory_v),
(il*n_ctx)*ggml_v3_element_size(model.memory_v)*n_embd + n_past*ggml_v3_element_size(model.memory_v));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
ggml_v3_build_forward_expand(gf, ggml_v3_cpy(ctx0, Kcur, k));
ggml_v3_build_forward_expand(gf, ggml_v3_cpy(ctx0, Vcur, v));
}
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
struct ggml_tensor * Q =
ggml_permute(ctx0,
struct ggml_v3_tensor * Q =
ggml_v3_permute(ctx0,
Qcur,
0, 2, 1, 3);
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
struct ggml_tensor * K =
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
struct ggml_v3_tensor * K =
ggml_v3_permute(ctx0,
ggml_v3_reshape_3d(ctx0,
ggml_v3_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_v3_element_size(model.memory_k)*n_embd),
n_embd/n_head, n_head, n_past + N),
0, 2, 1, 3);
// K * Q
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
struct ggml_v3_tensor * KQ = ggml_v3_mul_mat(ctx0, K, Q);
// KQ_scaled = KQ / sqrt(n_embd/n_head)
struct ggml_tensor * KQ_scaled =
ggml_scale_inplace(ctx0,
struct ggml_v3_tensor * KQ_scaled =
ggml_v3_scale_inplace(ctx0,
KQ,
1.0f/sqrt(float(n_embd)/n_head)
);
// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
struct ggml_v3_tensor * KQ_masked = ggml_v3_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
// KQ = soft_max(KQ_masked)
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
struct ggml_v3_tensor * KQ_soft_max = ggml_v3_soft_max_inplace(ctx0, KQ_masked);
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
struct ggml_tensor * V =
ggml_view_3d(ctx0, model.memory_v,
struct ggml_v3_tensor * V =
ggml_v3_view_3d(ctx0, model.memory_v,
n_past + N, n_embd/n_head, n_head,
n_ctx*ggml_element_size(model.memory_v),
n_ctx*ggml_element_size(model.memory_v)*n_embd/n_head,
il*n_ctx*ggml_element_size(model.memory_v)*n_embd);
n_ctx*ggml_v3_element_size(model.memory_v),
n_ctx*ggml_v3_element_size(model.memory_v)*n_embd/n_head,
il*n_ctx*ggml_v3_element_size(model.memory_v)*n_embd);
// KQV = transpose(V) * KQ_soft_max
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
struct ggml_v3_tensor * KQV = ggml_v3_mul_mat(ctx0, V, KQ_soft_max);
// KQV_merged = KQV.permute(0, 2, 1, 3)
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
struct ggml_v3_tensor * KQV_merged = ggml_v3_permute(ctx0, KQV, 0, 2, 1, 3);
// cur = KQV_merged.contiguous().view(n_embd, N)
cur = ggml_cpy(ctx0,
cur = ggml_v3_cpy(ctx0,
KQV_merged,
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
ggml_v3_new_tensor_2d(ctx0, GGML_V3_TYPE_F32, n_embd, N));
// projection
{
cur = ggml_mul_mat(ctx0,
cur = ggml_v3_mul_mat(ctx0,
model.layers[il].c_attn_proj_w,
cur);
cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), cur);
cur = ggml_v3_add(ctx0, ggml_v3_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), cur);
}
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
ggml_v3_set_scratch(ctx0, { 0, scr1_size, scr1, });
}
if (hparams.par_res == 0) {
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpL);
struct ggml_v3_tensor * inpFF = ggml_v3_add(ctx0, cur, inpL);
cur = gpt_neox_ff(model.layers[il], ctx0, inpFF);
// input for next layer
inpL = ggml_add(ctx0, cur, inpFF);
inpL = ggml_v3_add(ctx0, cur, inpFF);
} else {
struct ggml_tensor * inpFF = cur;
struct ggml_v3_tensor * inpFF = cur;
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
// note here we pass inpL instead of cur
cur = gpt_neox_ff(model.layers[il], ctx0, inpL);
// layer input + FF
cur = ggml_add(ctx0, cur, inpFF);
cur = ggml_v3_add(ctx0, cur, inpFF);
// input for next layer
inpL = ggml_add(ctx0, cur, inpL);
inpL = ggml_v3_add(ctx0, cur, inpL);
}
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
ggml_v3_set_scratch(ctx0, { 0, scr0_size, scr0, });
}
// norm
{
inpL = ggml_norm(ctx0, inpL, default_norm_eps);
inpL = ggml_v3_norm(ctx0, inpL, default_norm_eps);
// inpL = ln_f_g*inpL + ln_f_b
inpL = ggml_add(ctx0,
ggml_mul(ctx0,
ggml_repeat(ctx0, model.ln_f_g, inpL),
inpL = ggml_v3_add(ctx0,
ggml_v3_mul(ctx0,
ggml_v3_repeat(ctx0, model.ln_f_g, inpL),
inpL),
ggml_repeat(ctx0, model.ln_f_b, inpL));
ggml_v3_repeat(ctx0, model.ln_f_b, inpL));
}
if(use_scratch){
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
ggml_v3_set_scratch(ctx0, { 0, 0, nullptr, });
}
// lm_head
{
inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL);
inpL = ggml_v3_mul_mat(ctx0, model.lmh_g, inpL);
//inpL = ggml_add(ctx0,
// ggml_repeat(ctx0, model.lmh_b, inpL),
//inpL = ggml_v3_add(ctx0,
// ggml_v3_repeat(ctx0, model.lmh_b, inpL),
// inpL);
}
// logits -> probs
//inpL = ggml_soft_max_inplace(ctx0, inpL);
//inpL = ggml_v3_soft_max_inplace(ctx0, inpL);
// run the computation
ggml_build_forward_expand(gf, inpL);
ggml_v3_build_forward_expand(gf, inpL);
kcpp_graph_compute_helper(gf, n_threads);
//if (n_past%100 == 0) {
// ggml_graph_print (&gf);
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
// ggml_v3_graph_print (&gf);
// ggml_v3_graph_dump_dot(&gf, NULL, "gpt-2.dot");
//}
//embd_w.resize(n_vocab*N);
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
//memcpy(embd_w.data(), ggml_v3_get_data(inpL), sizeof(float)*n_vocab*N);
// return result for just the last token
embd_w.resize(n_vocab);
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
memcpy(embd_w.data(), (float *) ggml_v3_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
if (mem_per_token == 0) {
mem_per_token = ggml_used_mem(ctx0)/N;
mem_per_token = ggml_v3_used_mem(ctx0)/N;
}
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
//printf("used_mem = %zu\n", ggml_v3_used_mem(ctx0));
ggml_free(ctx0);
ggml_v3_free(ctx0);
return true;
}

View file

@ -31,22 +31,22 @@ struct gptj_hparams {
struct gptj_layer {
// normalization
struct ggml_tensor * ln_1_g;
struct ggml_tensor * ln_1_b;
struct ggml_v3_tensor * ln_1_g;
struct ggml_v3_tensor * ln_1_b;
// attention
struct ggml_tensor * c_attn_q_proj_w;
struct ggml_tensor * c_attn_k_proj_w;
struct ggml_tensor * c_attn_v_proj_w;
struct ggml_v3_tensor * c_attn_q_proj_w;
struct ggml_v3_tensor * c_attn_k_proj_w;
struct ggml_v3_tensor * c_attn_v_proj_w;
struct ggml_tensor * c_attn_proj_w;
struct ggml_v3_tensor * c_attn_proj_w;
// ff
struct ggml_tensor * c_mlp_fc_w;
struct ggml_tensor * c_mlp_fc_b;
struct ggml_v3_tensor * c_mlp_fc_w;
struct ggml_v3_tensor * c_mlp_fc_b;
struct ggml_tensor * c_mlp_proj_w;
struct ggml_tensor * c_mlp_proj_b;
struct ggml_v3_tensor * c_mlp_proj_w;
struct ggml_v3_tensor * c_mlp_proj_b;
};
struct gptj_layer_v2 {
// normalization
@ -139,23 +139,23 @@ struct gptj_model {
gptj_hparams hparams;
// normalization
struct ggml_tensor * ln_f_g;
struct ggml_tensor * ln_f_b;
struct ggml_v3_tensor * ln_f_g;
struct ggml_v3_tensor * ln_f_b;
struct ggml_tensor * wte; // position embedding
struct ggml_v3_tensor * wte; // position embedding
struct ggml_tensor * lmh_g; // language model head
struct ggml_tensor * lmh_b; // language model bias
struct ggml_v3_tensor * lmh_g; // language model head
struct ggml_v3_tensor * lmh_b; // language model bias
std::vector<gptj_layer> layers;
// key + value memory
struct ggml_tensor * memory_k;
struct ggml_tensor * memory_v;
struct ggml_v3_tensor * memory_k;
struct ggml_v3_tensor * memory_v;
//
struct ggml_context * ctx;
std::map<std::string, struct ggml_tensor *> tensors;
struct ggml_v3_context * ctx;
std::map<std::string, struct ggml_v3_tensor *> tensors;
};
// default hparams (GPT-2 117M)
@ -259,47 +259,47 @@ struct gpt2_v2_model {
struct gpt2_layer {
// normalization
struct ggml_tensor * ln_1_g;
struct ggml_tensor * ln_1_b;
struct ggml_v3_tensor * ln_1_g;
struct ggml_v3_tensor * ln_1_b;
struct ggml_tensor * ln_2_g;
struct ggml_tensor * ln_2_b;
struct ggml_v3_tensor * ln_2_g;
struct ggml_v3_tensor * ln_2_b;
// attention
struct ggml_tensor * c_attn_attn_w;
struct ggml_tensor * c_attn_attn_b;
struct ggml_v3_tensor * c_attn_attn_w;
struct ggml_v3_tensor * c_attn_attn_b;
struct ggml_tensor * c_attn_proj_w;
struct ggml_tensor * c_attn_proj_b;
struct ggml_v3_tensor * c_attn_proj_w;
struct ggml_v3_tensor * c_attn_proj_b;
// mlp
struct ggml_tensor * c_mlp_fc_w;
struct ggml_tensor * c_mlp_fc_b;
struct ggml_v3_tensor * c_mlp_fc_w;
struct ggml_v3_tensor * c_mlp_fc_b;
struct ggml_tensor * c_mlp_proj_w;
struct ggml_tensor * c_mlp_proj_b;
struct ggml_v3_tensor * c_mlp_proj_w;
struct ggml_v3_tensor * c_mlp_proj_b;
};
struct gpt2_model {
gpt2_hparams hparams;
// normalization
struct ggml_tensor * ln_f_g;
struct ggml_tensor * ln_f_b;
struct ggml_v3_tensor * ln_f_g;
struct ggml_v3_tensor * ln_f_b;
struct ggml_tensor * wte; // position embedding
struct ggml_tensor * wpe; // token embedding
struct ggml_tensor * lm_head; // language model head
struct ggml_v3_tensor * wte; // position embedding
struct ggml_v3_tensor * wpe; // token embedding
struct ggml_v3_tensor * lm_head; // language model head
std::vector<gpt2_layer> layers;
// key + value memory
struct ggml_tensor * memory_k;
struct ggml_tensor * memory_v;
struct ggml_v3_tensor * memory_k;
struct ggml_v3_tensor * memory_v;
//
struct ggml_context * ctx;
std::map<std::string, struct ggml_tensor *> tensors;
struct ggml_v3_context * ctx;
std::map<std::string, struct ggml_v3_tensor *> tensors;
};
// default hparams (StableLM 3B)
@ -351,7 +351,7 @@ struct gpt_neox_v2_model {
struct ggml_v2_tensor * wte; // position embedding
struct ggml_v2_tensor * lmh_g; // language model head
//struct ggml_tensor * lmh_b; // language model bias
//struct ggml_v3_tensor * lmh_b; // language model bias
std::vector<gpt_neox_layer_v2> layers;
@ -366,49 +366,49 @@ struct gpt_neox_v2_model {
struct gpt_neox_layer {
// pre normalization
struct ggml_tensor * ln_1_g;
struct ggml_tensor * ln_1_b;
struct ggml_v3_tensor * ln_1_g;
struct ggml_v3_tensor * ln_1_b;
// attention
struct ggml_tensor * c_attn_attn_w;
struct ggml_tensor * c_attn_attn_b;
struct ggml_v3_tensor * c_attn_attn_w;
struct ggml_v3_tensor * c_attn_attn_b;
struct ggml_tensor * c_attn_proj_w;
struct ggml_tensor * c_attn_proj_b;
struct ggml_v3_tensor * c_attn_proj_w;
struct ggml_v3_tensor * c_attn_proj_b;
// post normalization
struct ggml_tensor * ln_2_g;
struct ggml_tensor * ln_2_b;
struct ggml_v3_tensor * ln_2_g;
struct ggml_v3_tensor * ln_2_b;
// ff
struct ggml_tensor * c_mlp_fc_w;
struct ggml_tensor * c_mlp_fc_b;
struct ggml_v3_tensor * c_mlp_fc_w;
struct ggml_v3_tensor * c_mlp_fc_b;
struct ggml_tensor * c_mlp_proj_w;
struct ggml_tensor * c_mlp_proj_b;
struct ggml_v3_tensor * c_mlp_proj_w;
struct ggml_v3_tensor * c_mlp_proj_b;
};
struct gpt_neox_model {
gpt_neox_hparams hparams;
// normalization
struct ggml_tensor * ln_f_g;
struct ggml_tensor * ln_f_b;
struct ggml_v3_tensor * ln_f_g;
struct ggml_v3_tensor * ln_f_b;
struct ggml_tensor * wte; // position embedding
struct ggml_v3_tensor * wte; // position embedding
struct ggml_tensor * lmh_g; // language model head
//struct ggml_tensor * lmh_b; // language model bias
struct ggml_v3_tensor * lmh_g; // language model head
//struct ggml_v3_tensor * lmh_b; // language model bias
std::vector<gpt_neox_layer> layers;
// key + value memory
struct ggml_tensor * memory_k;
struct ggml_tensor * memory_v;
struct ggml_v3_tensor * memory_k;
struct ggml_v3_tensor * memory_v;
//
struct ggml_context * ctx;
std::map<std::string, struct ggml_tensor *> tensors;
struct ggml_v3_context * ctx;
std::map<std::string, struct ggml_v3_tensor *> tensors;
};
@ -428,35 +428,35 @@ struct mpt_hparams {
struct mpt_layer {
// pre normalization
struct ggml_tensor * norm_1_weight;
struct ggml_v3_tensor * norm_1_weight;
// attention
struct ggml_tensor * c_attn_wqkv_weight;
struct ggml_tensor * c_attn_out_proj_weight;
struct ggml_v3_tensor * c_attn_wqkv_weight;
struct ggml_v3_tensor * c_attn_out_proj_weight;
// post normalization
struct ggml_tensor * norm_2_weight;
struct ggml_v3_tensor * norm_2_weight;
// ff
struct ggml_tensor * ffn_up_proj;
struct ggml_tensor * ffn_down_proj;
struct ggml_v3_tensor * ffn_up_proj;
struct ggml_v3_tensor * ffn_down_proj;
};
struct mpt_model {
mpt_hparams hparams;
struct ggml_tensor * wte_weight; // position embedding
struct ggml_tensor * norm_f_weight; // language model head
struct ggml_v3_tensor * wte_weight; // position embedding
struct ggml_v3_tensor * norm_f_weight; // language model head
std::vector<mpt_layer> layers;
// key + value memory
struct ggml_tensor * memory_k;
struct ggml_tensor * memory_v;
struct ggml_v3_tensor * memory_k;
struct ggml_v3_tensor * memory_v;
struct ggml_context * ctx;
std::map<std::string, struct ggml_tensor *> tensors;
struct ggml_v3_context * ctx;
std::map<std::string, struct ggml_v3_tensor *> tensors;
};
const float default_norm_eps = 1e-5f;
const size_t GGML_MAX_NODES = 8192;
const size_t GGML_V3_MAX_NODES = 8192;

File diff suppressed because it is too large Load diff

View file

@ -9,7 +9,6 @@
#include <sstream>
void utreplace(std::string & str, const std::string & needle, const std::string & replacement) {
size_t pos = 0;
while ((pos = str.find(needle, pos)) != std::string::npos) {
@ -224,13 +223,13 @@ bool should_transpose_layer(std::string name)
}
static std::vector<uint8_t> kcpp_compute_buf;
void kcpp_graph_compute_helper(ggml_cgraph *graph, int n_threads)
void kcpp_graph_compute_helper(struct ggml_v3_cgraph *graph, int n_threads)
{
struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
struct ggml_v3_cplan plan = ggml_v3_graph_plan(graph, n_threads);
if (plan.work_size > 0)
{
kcpp_compute_buf.resize(plan.work_size);
plan.work_data = kcpp_compute_buf.data();
}
ggml_graph_compute(graph, &plan);
ggml_v3_graph_compute(graph, &plan);
}

View file

@ -8,6 +8,7 @@
#include <random>
#include <thread>
#include "common.h"
#include "ggml_v3.h"
//
// CLI argument parsing
@ -53,7 +54,5 @@ void gpt_split_words(std::string str, std::vector<std::string>& words);
std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::string & text);
bool should_transpose_layer(std::string name);
void kcpp_graph_compute_helper(ggml_cgraph * graph, int n_threads);
void kcpp_graph_compute_helper(ggml_v3_cgraph * graph, int n_threads);