mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 17:44:38 +00:00
Merge branch 'master' into concedo_experimental
# Conflicts: # .devops/nix/package.nix # .github/workflows/docker.yml # CMakeLists.txt
This commit is contained in:
commit
0a70cc1ba7
11 changed files with 357 additions and 283 deletions
26
.devops/main-intel.Dockerfile
Normal file
26
.devops/main-intel.Dockerfile
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04
|
||||||
|
ARG UBUNTU_VERSION=22.04
|
||||||
|
|
||||||
|
FROM intel/hpckit:$ONEAPI_VERSION as build
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y git
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# for some reasons, "-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DLLAMA_NATIVE=ON" give worse performance
|
||||||
|
RUN mkdir build && \
|
||||||
|
cd build && \
|
||||||
|
cmake .. -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx && \
|
||||||
|
cmake --build . --config Release --target main server
|
||||||
|
|
||||||
|
FROM ubuntu:$UBUNTU_VERSION as runtime
|
||||||
|
|
||||||
|
COPY --from=build /app/build/bin/main /main
|
||||||
|
COPY --from=build /app/build/bin/server /server
|
||||||
|
|
||||||
|
ENV LC_ALL=C.utf8
|
||||||
|
|
||||||
|
ENTRYPOINT [ "/main" ]
|
|
@ -217,12 +217,10 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
||||||
}
|
}
|
||||||
// store the external file name in params
|
// store the external file name in params
|
||||||
params.prompt_file = argv[i];
|
params.prompt_file = argv[i];
|
||||||
file.seekg(0, std::ios::end);
|
std::ostringstream ss;
|
||||||
size_t size = file.tellg();
|
ss << file.rdbuf();
|
||||||
file.seekg(0, std::ios::beg);
|
params.prompt = ss.str();
|
||||||
params.prompt.resize(size);
|
fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), argv[i]);
|
||||||
file.read((char *)params.prompt.data(), size);
|
|
||||||
fprintf(stderr, "Read %zu bytes from binary file %s\n", size, argv[i]);
|
|
||||||
} else if (arg == "-f" || arg == "--file") {
|
} else if (arg == "-f" || arg == "--file") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
" Similarly, you could add an insert mode keybind with
|
" Similarly, you could add an insert mode keybind with
|
||||||
" inoremap <C-B> <Cmd>call llama#doLlamaGen()<CR>
|
" inoremap <C-B> <Cmd>call llama#doLlamaGen()<CR>
|
||||||
"
|
"
|
||||||
" g:llama_api_url and g:llama_overrides can be configured in your .vimrc
|
" g:llama_api_url, g:llama_api_key and g:llama_overrides can be configured in your .vimrc
|
||||||
" let g:llama_api_url = "192.168.1.10:8080"
|
" let g:llama_api_url = "192.168.1.10:8080"
|
||||||
" llama_overrides can also be set through buffer/window scopes. For instance
|
" llama_overrides can also be set through buffer/window scopes. For instance
|
||||||
" autocmd filetype python let b:llama_overrides = {"temp": 0.2}
|
" autocmd filetype python let b:llama_overrides = {"temp": 0.2}
|
||||||
|
@ -82,6 +82,9 @@ func llama#doLlamaGen()
|
||||||
endif
|
endif
|
||||||
let l:querydata.prompt = join(l:buflines, "\n")
|
let l:querydata.prompt = join(l:buflines, "\n")
|
||||||
let l:curlcommand = copy(s:curlcommand)
|
let l:curlcommand = copy(s:curlcommand)
|
||||||
|
if exists("g:llama_api_key")
|
||||||
|
call extend(l:curlcommand, ['--header', 'Authorization: Bearer ' .. g:llama_api_key])
|
||||||
|
endif
|
||||||
let l:curlcommand[2] = json_encode(l:querydata)
|
let l:curlcommand[2] = json_encode(l:querydata)
|
||||||
let b:job = job_start(l:curlcommand, {"callback": function("s:callbackHandler", [l:cbuffer])})
|
let b:job = job_start(l:curlcommand, {"callback": function("s:callbackHandler", [l:cbuffer])})
|
||||||
endfunction
|
endfunction
|
||||||
|
|
|
@ -2,18 +2,6 @@
|
||||||
// so there might be still unnecessary artifacts hanging around
|
// so there might be still unnecessary artifacts hanging around
|
||||||
// I'll gradually clean and extend it
|
// I'll gradually clean and extend it
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
#include <cmath>
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <cstring>
|
|
||||||
#include <fstream>
|
|
||||||
#include <iostream>
|
|
||||||
#include <map>
|
|
||||||
#include <regex>
|
|
||||||
#include <stdexcept>
|
|
||||||
#include <vector>
|
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
#include "clip.h"
|
#include "clip.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "ggml-alloc.h"
|
#include "ggml-alloc.h"
|
||||||
|
@ -30,6 +18,19 @@
|
||||||
#define STB_IMAGE_IMPLEMENTATION
|
#define STB_IMAGE_IMPLEMENTATION
|
||||||
#include "stb_image.h"
|
#include "stb_image.h"
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <cmath>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <cstring>
|
||||||
|
#include <fstream>
|
||||||
|
#include <iostream>
|
||||||
|
#include <map>
|
||||||
|
#include <regex>
|
||||||
|
#include <stdexcept>
|
||||||
|
#include <vector>
|
||||||
|
#include <sstream>
|
||||||
|
#include <cinttypes>
|
||||||
|
|
||||||
static std::string format(const char * fmt, ...) {
|
static std::string format(const char * fmt, ...) {
|
||||||
va_list ap;
|
va_list ap;
|
||||||
va_list ap2;
|
va_list ap2;
|
||||||
|
@ -217,9 +218,9 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
|
||||||
|
|
||||||
static void print_tensor_info(const ggml_tensor* tensor, const char* prefix = "") {
|
static void print_tensor_info(const ggml_tensor* tensor, const char* prefix = "") {
|
||||||
size_t tensor_size = ggml_nbytes(tensor);
|
size_t tensor_size = ggml_nbytes(tensor);
|
||||||
printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%d, %d, %d, %d], type: %d\n",
|
printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n",
|
||||||
prefix, ggml_n_dims(tensor), tensor->name, tensor_size,
|
prefix, ggml_n_dims(tensor), tensor->name, tensor_size,
|
||||||
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->type);
|
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], ggml_type_name(tensor->type));
|
||||||
}
|
}
|
||||||
|
|
||||||
static projector_type clip_projector_type_from_string(const std::string & name) {
|
static projector_type clip_projector_type_from_string(const std::string & name) {
|
||||||
|
@ -592,7 +593,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
|
mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
|
||||||
mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
|
mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
|
||||||
// stride = 1, padding = 1, bias is nullptr
|
// stride = 1, padding = 1, bias is nullptr
|
||||||
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, nullptr, 1, 1, 1, 1, 1, 1);
|
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
|
||||||
|
|
||||||
// layer norm
|
// layer norm
|
||||||
// // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
|
// // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
|
||||||
|
@ -640,7 +641,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
// block_2
|
// block_2
|
||||||
{
|
{
|
||||||
// stride = 2
|
// stride = 2
|
||||||
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, nullptr, 2, 2, 1, 1, 1, 1);
|
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
|
||||||
|
|
||||||
// block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
|
// block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
|
||||||
// layer norm
|
// layer norm
|
||||||
|
@ -741,18 +742,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
{
|
{
|
||||||
std::map<enum ggml_type, uint32_t> n_type;
|
std::map<enum ggml_type, uint32_t> n_type;
|
||||||
|
|
||||||
uint32_t n_type_max = 0;
|
|
||||||
enum ggml_type type_max = GGML_TYPE_F32;
|
|
||||||
|
|
||||||
for (int i = 0; i < n_tensors; i++) {
|
for (int i = 0; i < n_tensors; i++) {
|
||||||
enum ggml_type type = gguf_get_tensor_type(ctx, i);
|
enum ggml_type type = gguf_get_tensor_type(ctx, i);
|
||||||
|
|
||||||
n_type[type]++;
|
n_type[type]++;
|
||||||
|
|
||||||
if (n_type_max < n_type[type]) {
|
|
||||||
n_type_max = n_type[type];
|
|
||||||
type_max = type;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
|
printf("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
|
||||||
|
@ -795,14 +788,12 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
size_t tensor_size = ggml_nbytes(cur);
|
size_t tensor_size = ggml_nbytes(cur);
|
||||||
buffer_size += tensor_size;
|
buffer_size += tensor_size;
|
||||||
if (verbosity >= 3) {
|
if (verbosity >= 3) {
|
||||||
printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%d, %d, %d, %d], type: %d\n", __func__, i,
|
printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
|
||||||
ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], type);
|
__func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
buffer_size += n_tensors * 128 /* CLIP PADDING */;
|
buffer_size += n_tensors * 128 /* CLIP PADDING */;
|
||||||
|
|
||||||
clip_ctx * new_clip = new clip_ctx;
|
clip_ctx * new_clip = new clip_ctx;
|
||||||
|
|
|
@ -223,13 +223,18 @@ struct kl_divergence_result {
|
||||||
double sum_kld2 = 0;
|
double sum_kld2 = 0;
|
||||||
double sum_nll_diff = 0;
|
double sum_nll_diff = 0;
|
||||||
double sum_nll_diff2 = 0;
|
double sum_nll_diff2 = 0;
|
||||||
|
size_t n_same_top = 0;
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void log_softmax(int n_vocab, const float * logits, const uint16_t * base_log_prob, int tok, kl_divergence_result & kld) {
|
static double log_softmax(int n_vocab, const float * logits, const uint16_t * base_log_prob, int tok, kl_divergence_result & kld) {
|
||||||
float max_logit = logits[0];
|
float max_logit = logits[0];
|
||||||
|
int imax = 0;
|
||||||
for (int i = 1; i < n_vocab; ++i) {
|
for (int i = 1; i < n_vocab; ++i) {
|
||||||
max_logit = std::max(max_logit, logits[i]);
|
if (logits[i] > max_logit) {
|
||||||
|
max_logit = logits[i];
|
||||||
|
imax = i;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
double sum_exp = 0.0;
|
double sum_exp = 0.0;
|
||||||
for (int i = 0; i < n_vocab; ++i) {
|
for (int i = 0; i < n_vocab; ++i) {
|
||||||
|
@ -248,8 +253,14 @@ static void log_softmax(int n_vocab, const float * logits, const uint16_t * base
|
||||||
kld.sum_nll_diff2 += nll*nll;
|
kld.sum_nll_diff2 += nll*nll;
|
||||||
max_logit += log_sum_exp;
|
max_logit += log_sum_exp;
|
||||||
double sum = 0;
|
double sum = 0;
|
||||||
|
int imax_base = -1;
|
||||||
|
float p_log_base_max = 0;
|
||||||
for (int i = 0; i < n_vocab; ++i) {
|
for (int i = 0; i < n_vocab; ++i) {
|
||||||
const float p_log_base = scale*base_log_prob[i] + min_log_prob;
|
const float p_log_base = scale*base_log_prob[i] + min_log_prob;
|
||||||
|
if (i == 0 || p_log_base > p_log_base_max) {
|
||||||
|
p_log_base_max = p_log_base;
|
||||||
|
imax_base = i;
|
||||||
|
}
|
||||||
if (p_log_base > -16.f) {
|
if (p_log_base > -16.f) {
|
||||||
const float p_base = expf(p_log_base);
|
const float p_base = expf(p_log_base);
|
||||||
sum += p_base * (p_log_base - logits[i] + max_logit);
|
sum += p_base * (p_log_base - logits[i] + max_logit);
|
||||||
|
@ -258,14 +269,17 @@ static void log_softmax(int n_vocab, const float * logits, const uint16_t * base
|
||||||
kld.sum_kld += sum;
|
kld.sum_kld += sum;
|
||||||
kld.sum_kld2 += sum*sum;
|
kld.sum_kld2 += sum*sum;
|
||||||
++kld.count;
|
++kld.count;
|
||||||
|
if (imax == imax_base) ++kld.n_same_top;
|
||||||
|
return sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token,
|
static void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token,
|
||||||
std::vector<std::thread> & workers, const std::vector<uint16_t> & base_log_probs, kl_divergence_result & kld) {
|
std::vector<std::thread> & workers, const std::vector<uint16_t> & base_log_probs, kl_divergence_result & kld,
|
||||||
|
float * kld_values) {
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
const int nv = 2*((n_vocab + 1)/2) + 4;
|
const int nv = 2*((n_vocab + 1)/2) + 4;
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
auto compute = [&mutex, &counter, &base_log_probs, &kld, n_vocab, logits, tokens, n_token, nv] () {
|
auto compute = [&mutex, &counter, &base_log_probs, &kld, n_vocab, logits, tokens, n_token, nv, kld_values] () {
|
||||||
kl_divergence_result local_kld;
|
kl_divergence_result local_kld;
|
||||||
while (true) {
|
while (true) {
|
||||||
std::unique_lock<std::mutex> lock(mutex);
|
std::unique_lock<std::mutex> lock(mutex);
|
||||||
|
@ -277,11 +291,13 @@ static void process_logits(int n_vocab, const float * logits, const int * tokens
|
||||||
kld.sum_kld2 += local_kld.sum_kld2;
|
kld.sum_kld2 += local_kld.sum_kld2;
|
||||||
kld.sum_nll_diff += local_kld.sum_nll_diff;
|
kld.sum_nll_diff += local_kld.sum_nll_diff;
|
||||||
kld.sum_nll_diff2 += local_kld.sum_nll_diff2;
|
kld.sum_nll_diff2 += local_kld.sum_nll_diff2;
|
||||||
|
kld.n_same_top += local_kld.n_same_top;
|
||||||
kld.count += local_kld.count;
|
kld.count += local_kld.count;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
|
double v = log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
|
||||||
|
kld_values[i] = (float)v;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
for (auto & w : workers) {
|
for (auto & w : workers) {
|
||||||
|
@ -1203,11 +1219,11 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||||
printf("Final Winogrande score(%d tasks): %.4lf +/- %.4lf\n", n_done, 100*p, sigma);
|
printf("Final Winogrande score(%d tasks): %.4lf +/- %.4lf\n", n_done, 100*p, sigma);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool deserialize_string(std::istream& in, std::string& str) {
|
static bool deserialize_string(std::istream & in, std::string & str) {
|
||||||
uint32_t size;
|
uint32_t size;
|
||||||
if (!in.read((char *)&size, sizeof(size)).fail()) {
|
if (!in.read((char *)&size, sizeof(size)).fail()) {
|
||||||
str.resize(size);
|
str.resize(size);
|
||||||
if (!in.read((char *)str.data(), size).fail()) return true;
|
if (!in.read((char *)&str[0], size).fail()) return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1616,7 +1632,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||||
in.read((char *)&n_vocab, sizeof(n_vocab));
|
in.read((char *)&n_vocab, sizeof(n_vocab));
|
||||||
in.read((char *)&n_chunk, sizeof(n_chunk));
|
in.read((char *)&n_chunk, sizeof(n_chunk));
|
||||||
if (in.fail()) {
|
if (in.fail()) {
|
||||||
fprintf(stderr, "%s: failed rwading n_vocab, n_chunk from %s\n", __func__, params.logits_file.c_str());
|
fprintf(stderr, "%s: failed reading n_vocab, n_chunk from %s\n", __func__, params.logits_file.c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (n_vocab != llama_n_vocab(llama_get_model(ctx))) {
|
if (n_vocab != llama_n_vocab(llama_get_model(ctx))) {
|
||||||
|
@ -1635,6 +1651,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||||
|
|
||||||
std::vector<uint16_t> log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv);
|
std::vector<uint16_t> log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv);
|
||||||
|
std::vector<float> kld_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk);
|
||||||
std::vector<float> logits;
|
std::vector<float> logits;
|
||||||
if (num_batches > 1) {
|
if (num_batches > 1) {
|
||||||
logits.reserve(n_ctx * n_vocab);
|
logits.reserve(n_ctx * n_vocab);
|
||||||
|
@ -1653,6 +1670,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||||
};
|
};
|
||||||
|
|
||||||
kl_divergence_result kld;
|
kl_divergence_result kld;
|
||||||
|
auto kld_ptr = kld_values.data();
|
||||||
|
|
||||||
for (int i = 0; i < n_chunk; ++i) {
|
for (int i = 0; i < n_chunk; ++i) {
|
||||||
const int start = i * n_ctx;
|
const int start = i * n_ctx;
|
||||||
|
@ -1706,20 +1724,24 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
|
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
|
||||||
|
|
||||||
printf("\nchunk PPL ln(PPL(Q)/PPL(base)) KL-Divergence\n");
|
printf("\nchunk PPL ln(PPL(Q)/PPL(base)) KL-Divergence Same top\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
const int first = n_ctx/2;
|
const int first = n_ctx/2;
|
||||||
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
|
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
|
||||||
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
||||||
workers, log_probs_uint16, kld);
|
workers, log_probs_uint16, kld, kld_ptr);
|
||||||
|
kld_ptr += n_ctx - 1 - first;
|
||||||
|
|
||||||
auto ppl = mean_and_uncertainty(kld.sum_nll, kld.sum_nll2, kld.count);
|
auto ppl = mean_and_uncertainty(kld.sum_nll, kld.sum_nll2, kld.count);
|
||||||
auto log_ppl_ratio = mean_and_uncertainty(kld.sum_nll_diff, kld.sum_nll_diff2, kld.count);
|
auto log_ppl_ratio = mean_and_uncertainty(kld.sum_nll_diff, kld.sum_nll_diff2, kld.count);
|
||||||
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
|
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
|
||||||
|
auto p_top = 1.*kld.n_same_top/kld.count;
|
||||||
|
auto d_p_top = sqrt(p_top*(1 - p_top)/(kld.count - 1));
|
||||||
|
|
||||||
printf("%4d %10.4lf %10.5lf ± %10.5f %10.5f ± %10.5lf\n", i+1, exp(ppl.first),
|
printf("%4d %10.4lf %10.5lf ± %10.5f %10.5f ± %10.5lf %.5f ± %.5f\n", i+1, exp(ppl.first),
|
||||||
log_ppl_ratio.first, log_ppl_ratio.second, kl_div.first, kl_div.second);
|
log_ppl_ratio.first, log_ppl_ratio.second, kl_div.first, kl_div.second,
|
||||||
|
p_top, d_p_top);
|
||||||
|
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
|
@ -1727,6 +1749,35 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
||||||
|
if (kld.count < 100) return; // we do not wish to do statistics on so few values
|
||||||
|
|
||||||
|
std::sort(kld_values.begin(), kld_values.end());
|
||||||
|
|
||||||
|
printf("===== KL-divergence statistics\n");
|
||||||
|
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
|
||||||
|
printf("Average: %10.6f ±%10.6lf\n", kl_div.first, kl_div.second);
|
||||||
|
auto kld_median = kld_values.size()%2 == 0 ? 0.5f*(kld_values[kld_values.size()/2] + kld_values[kld_values.size()/2-1])
|
||||||
|
: kld_values[kld_values.size()/2];
|
||||||
|
printf("Median : %10.6f\n", kld_median);
|
||||||
|
|
||||||
|
auto percentile = [&kld_values] (float fraction) {
|
||||||
|
if (fraction <= 0) return kld_values.front();
|
||||||
|
if (fraction >= 1) return kld_values.back();
|
||||||
|
float p = fraction*(kld_values.size() - 1);
|
||||||
|
size_t ip = size_t(p); p -= ip;
|
||||||
|
return (1 - p)*kld_values[ip] + p*kld_values[std::min(ip+1, kld_values.size()-1)];
|
||||||
|
};
|
||||||
|
|
||||||
|
printf("Maximum: %10.6f\n", kld_values.back());
|
||||||
|
printf("KLD_99 : %10.6f\n", percentile(0.99f));
|
||||||
|
printf("KLD_95 : %10.6f\n", percentile(0.95f));
|
||||||
|
printf("KLD_90 : %10.6f\n", percentile(0.90f));
|
||||||
|
|
||||||
|
printf("Minimum: %10.6f\n", kld_values.front());
|
||||||
|
printf("KLD_01 : %10.6f\n", percentile(0.01f));
|
||||||
|
printf("KLD_05 : %10.6f\n", percentile(0.05f));
|
||||||
|
printf("KLD_10 : %10.6f\n", percentile(0.10f));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
|
|
|
@ -109,8 +109,8 @@ void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
|
||||||
if (block->size >= size) {
|
if (block->size >= size) {
|
||||||
best_fit_block = alloc->n_free_blocks - 1;
|
best_fit_block = alloc->n_free_blocks - 1;
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
|
fprintf(stderr, "%s: not enough space in the buffer to allocate %s (needed %zu, largest block available %zu)\n",
|
||||||
__func__, size, max_avail);
|
__func__, tensor->name, size, max_avail);
|
||||||
GGML_ASSERT(!"not enough space in the buffer");
|
GGML_ASSERT(!"not enough space in the buffer");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
89
ggml-cuda.cu
89
ggml-cuda.cu
|
@ -13,6 +13,10 @@
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <array>
|
#include <array>
|
||||||
|
|
||||||
|
// stringize macro for converting __CUDA_ARCH_LIST__ (list of integers) to string
|
||||||
|
#define STRINGIZE_IMPL(...) #__VA_ARGS__
|
||||||
|
#define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)
|
||||||
|
|
||||||
#if defined(GGML_USE_HIPBLAS)
|
#if defined(GGML_USE_HIPBLAS)
|
||||||
#include <hip/hip_runtime.h>
|
#include <hip/hip_runtime.h>
|
||||||
#include <hipblas/hipblas.h>
|
#include <hipblas/hipblas.h>
|
||||||
|
@ -585,13 +589,28 @@ static cuda_device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, 0,
|
||||||
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
||||||
|
|
||||||
[[noreturn]]
|
[[noreturn]]
|
||||||
static __device__ void bad_arch() {
|
static __device__ void no_device_code(
|
||||||
printf("ERROR: ggml-cuda was compiled without support for the current GPU architecture.\n");
|
const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) {
|
||||||
|
|
||||||
|
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
printf("%s:%d: ERROR: HIP kernel %s has no device code compatible with HIP arch %d.\n",
|
||||||
|
file_name, line, function_name, arch);
|
||||||
|
(void) arch_list;
|
||||||
|
#else
|
||||||
|
printf("%s:%d: ERROR: CUDA kernel %s has no device code compatible with CUDA arch %d. ggml-cuda.cu was compiled for: %s\n",
|
||||||
|
file_name, line, function_name, arch, arch_list);
|
||||||
|
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
__trap();
|
__trap();
|
||||||
|
|
||||||
(void) bad_arch; // suppress unused function warning
|
(void) no_device_code; // suppress unused function warning
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef __CUDA_ARCH__
|
||||||
|
#define NO_DEVICE_CODE no_device_code(__FILE__, __LINE__, __FUNCTION__, __CUDA_ARCH__, STRINGIZE(__CUDA_ARCH_LIST__))
|
||||||
|
#else
|
||||||
|
#define NO_DEVICE_CODE GGML_ASSERT(false && "NO_DEVICE_CODE not valid in host code.")
|
||||||
|
#endif // __CUDA_ARCH__
|
||||||
|
|
||||||
static __device__ __forceinline__ float warp_reduce_sum(float x) {
|
static __device__ __forceinline__ float warp_reduce_sum(float x) {
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
@ -618,7 +637,7 @@ static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) {
|
||||||
return a;
|
return a;
|
||||||
#else
|
#else
|
||||||
(void) a;
|
(void) a;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -639,7 +658,7 @@ static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
|
||||||
return x;
|
return x;
|
||||||
#else
|
#else
|
||||||
(void) x;
|
(void) x;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2422,7 +2441,7 @@ static __global__ void dequantize_block_q8_0_f16(const void * __restrict__ vx, h
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
(void) vx; (void) y; (void) k;
|
(void) vx; (void) y; (void) k;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_PASCAL
|
#endif // __CUDA_ARCH__ >= CC_PASCAL
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2453,7 +2472,7 @@ template <int vdr> static __device__ __forceinline__ float vec_dot_q4_0_q8_1_imp
|
||||||
// second part effectively subtracts 8 from each quant value
|
// second part effectively subtracts 8 from each quant value
|
||||||
return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y);
|
return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y);
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2490,7 +2509,7 @@ template <int vdr> static __device__ __forceinline__ float vec_dot_q4_1_q8_1_imp
|
||||||
// scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
|
// scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
|
||||||
return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
|
return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2525,7 +2544,7 @@ template <int vdr> static __device__ __forceinline__ float vec_dot_q5_0_q8_1_imp
|
||||||
// second part effectively subtracts 16 from each quant value
|
// second part effectively subtracts 16 from each quant value
|
||||||
return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y);
|
return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y);
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2570,7 +2589,7 @@ template <int vdr> static __device__ __forceinline__ float vec_dot_q5_1_q8_1_imp
|
||||||
return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
|
return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2591,7 +2610,7 @@ template <int vdr> static __device__ __forceinline__ float vec_dot_q8_0_q8_1_imp
|
||||||
|
|
||||||
return d8_0*d8_1 * sumi;
|
return d8_0*d8_1 * sumi;
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2621,7 +2640,7 @@ template <int vdr> static __device__ __forceinline__ float vec_dot_q8_1_q8_1_imp
|
||||||
// scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
|
// scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
|
||||||
return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
|
return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2656,7 +2675,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq(
|
||||||
|
|
||||||
return dm2f.x*sumf_d - dm2f.y*sumf_m;
|
return dm2f.x*sumf_d - dm2f.y*sumf_m;
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2693,7 +2712,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq(
|
||||||
|
|
||||||
return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m);
|
return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m);
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2733,7 +2752,7 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq(
|
||||||
|
|
||||||
return d3 * sumf;
|
return d3 * sumf;
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2758,7 +2777,7 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq(
|
||||||
|
|
||||||
return d3*d8 * sumi;
|
return d3*d8 * sumi;
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2791,7 +2810,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq(
|
||||||
return dm4f.x*sumf_d - dm4f.y*sumf_m;
|
return dm4f.x*sumf_d - dm4f.y*sumf_m;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2824,7 +2843,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq(
|
||||||
return dm4f.x*sumf_d - dm4f.y*sumf_m;
|
return dm4f.x*sumf_d - dm4f.y*sumf_m;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2864,7 +2883,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq(
|
||||||
return dm5f.x*sumf_d - dm5f.y*sumf_m;
|
return dm5f.x*sumf_d - dm5f.y*sumf_m;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2897,7 +2916,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq(
|
||||||
return dm4f.x*sumf_d - dm4f.y*sumf_m;
|
return dm4f.x*sumf_d - dm4f.y*sumf_m;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2927,7 +2946,7 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq(
|
||||||
|
|
||||||
return d*sumf;
|
return d*sumf;
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2958,7 +2977,7 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq(
|
||||||
return d6 * sumf_d;
|
return d6 * sumf_d;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3824,7 +3843,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1(
|
||||||
return dall * sumf_d - dmin * sumf_m;
|
return dall * sumf_d - dmin * sumf_m;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -4007,7 +4026,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1(
|
||||||
return d * sumf_d;
|
return d * sumf_d;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -4502,7 +4521,7 @@ template <bool need_check> static __global__ void
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q4_0_q8_1_mul_mat;
|
(void) vec_dot_q4_0_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4571,7 +4590,7 @@ template <bool need_check> static __global__ void
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q4_1_q8_1_mul_mat;
|
(void) vec_dot_q4_1_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4638,7 +4657,7 @@ template <bool need_check> static __global__ void
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q5_0_q8_1_mul_mat;
|
(void) vec_dot_q5_0_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4705,7 +4724,7 @@ mul_mat_q5_1(
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q5_1_q8_1_mul_mat;
|
(void) vec_dot_q5_1_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4772,7 +4791,7 @@ template <bool need_check> static __global__ void
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q8_0_q8_1_mul_mat;
|
(void) vec_dot_q8_0_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4839,7 +4858,7 @@ mul_mat_q2_K(
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q2_K_q8_1_mul_mat;
|
(void) vec_dot_q2_K_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4908,7 +4927,7 @@ template <bool need_check> static __global__ void
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q3_K_q8_1_mul_mat;
|
(void) vec_dot_q3_K_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4977,7 +4996,7 @@ template <bool need_check> static __global__ void
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q4_K_q8_1_mul_mat;
|
(void) vec_dot_q4_K_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5044,7 +5063,7 @@ mul_mat_q5_K(
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q5_K_q8_1_mul_mat;
|
(void) vec_dot_q5_K_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5113,7 +5132,7 @@ template <bool need_check> static __global__ void
|
||||||
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
|
||||||
#else
|
#else
|
||||||
(void) vec_dot_q6_K_q8_1_mul_mat;
|
(void) vec_dot_q6_K_q8_1_mul_mat;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
#endif // __CUDA_ARCH__ >= CC_VOLTA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5836,7 +5855,7 @@ static __global__ void soft_max_f16(const float * x, const float * y, float * ds
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
(void) x; (void) y; (void) dst; (void) ncols_par; (void) nrows_y; (void) scale;
|
(void) x; (void) y; (void) dst; (void) ncols_par; (void) nrows_y; (void) scale;
|
||||||
bad_arch();
|
NO_DEVICE_CODE;
|
||||||
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -668,7 +668,8 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
|
||||||
return true;
|
return true;
|
||||||
case GGML_OP_MUL_MAT:
|
case GGML_OP_MUL_MAT:
|
||||||
case GGML_OP_MUL_MAT_ID:
|
case GGML_OP_MUL_MAT_ID:
|
||||||
return ctx->support_simdgroup_reduction;
|
return ctx->support_simdgroup_reduction &&
|
||||||
|
(op->src[0]->type != GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F32);
|
||||||
case GGML_OP_CPY:
|
case GGML_OP_CPY:
|
||||||
case GGML_OP_DUP:
|
case GGML_OP_DUP:
|
||||||
case GGML_OP_CONT:
|
case GGML_OP_CONT:
|
||||||
|
|
9
ggml.c
9
ggml.c
|
@ -5368,14 +5368,12 @@ struct ggml_tensor * ggml_conv_depthwise_2d(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b,
|
struct ggml_tensor * b,
|
||||||
struct ggml_tensor * c,
|
|
||||||
int s0,
|
int s0,
|
||||||
int s1,
|
int s1,
|
||||||
int p0,
|
int p0,
|
||||||
int p1,
|
int p1,
|
||||||
int d0,
|
int d0,
|
||||||
int d1) {
|
int d1) {
|
||||||
|
|
||||||
struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], a->ne[1], 1, a->ne[2] * a->ne[3]);
|
struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], a->ne[1], 1, a->ne[2] * a->ne[3]);
|
||||||
struct ggml_tensor * im2col = ggml_im2col(ctx, new_a,
|
struct ggml_tensor * im2col = ggml_im2col(ctx, new_a,
|
||||||
ggml_reshape_4d(ctx, b, b->ne[0], b->ne[1], 1, b->ne[2] * b->ne[3]),
|
ggml_reshape_4d(ctx, b, b->ne[0], b->ne[1], 1, b->ne[2] * b->ne[3]),
|
||||||
|
@ -9991,7 +9989,7 @@ static void ggml_compute_forward_mul_mat(
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const int64_t tgemm0 = ggml_perf_time_us();
|
//const int64_t tgemm0 = ggml_perf_time_us();
|
||||||
for (int64_t i13 = 0; i13 < ne13; i13++) {
|
for (int64_t i13 = 0; i13 < ne13; i13++) {
|
||||||
for (int64_t i12 = 0; i12 < ne12; i12++) {
|
for (int64_t i12 = 0; i12 < ne12; i12++) {
|
||||||
const int64_t i03 = i13/r3;
|
const int64_t i03 = i13/r3;
|
||||||
|
@ -16934,7 +16932,10 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
|
||||||
if (ggml_compute_forward_mul_mat_use_blas(node)) {
|
if (ggml_compute_forward_mul_mat_use_blas(node)) {
|
||||||
if (node->src[0]->type != GGML_TYPE_F32) {
|
if (node->src[0]->type != GGML_TYPE_F32) {
|
||||||
// here we need memory for fully dequantized matrix from src0
|
// here we need memory for fully dequantized matrix from src0
|
||||||
cur = ggml_type_size(GGML_TYPE_F32)*ggml_nelements(node->src[0]);
|
// take into account that src0 can be broadcasted into src1[2,3]
|
||||||
|
cur = ggml_type_size(GGML_TYPE_F32)
|
||||||
|
* node->src[0]->ne[0]*node->src[0]->ne[1]
|
||||||
|
* node->src[1]->ne[2]*node->src[1]->ne[3];
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
|
|
1
ggml.h
1
ggml.h
|
@ -1506,7 +1506,6 @@ extern "C" {
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b,
|
struct ggml_tensor * b,
|
||||||
struct ggml_tensor * c,
|
|
||||||
int s0,
|
int s0,
|
||||||
int s1,
|
int s1,
|
||||||
int p0,
|
int p0,
|
||||||
|
|
371
llama.cpp
371
llama.cpp
|
@ -1679,6 +1679,9 @@ struct llama_context {
|
||||||
for (ggml_backend_t backend : backends) {
|
for (ggml_backend_t backend : backends) {
|
||||||
ggml_backend_free(backend);
|
ggml_backend_free(backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ggml_backend_buffer_free(buf_input);
|
||||||
|
ggml_free(ctx_input);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_cparams cparams;
|
llama_cparams cparams;
|
||||||
|
@ -1725,8 +1728,14 @@ struct llama_context {
|
||||||
// allocator for the input tensors
|
// allocator for the input tensors
|
||||||
ggml_tallocr * alloc = nullptr;
|
ggml_tallocr * alloc = nullptr;
|
||||||
|
|
||||||
// temporary buffer for copying data to/from the backend
|
// input tensors
|
||||||
std::vector<no_init<uint8_t>> buf_copy;
|
ggml_backend_buffer_t buf_input = nullptr;
|
||||||
|
ggml_context * ctx_input = nullptr;
|
||||||
|
struct ggml_tensor * inp_tokens; // I32 [n_batch]
|
||||||
|
struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
|
||||||
|
struct ggml_tensor * inp_pos; // I32 [n_batch]
|
||||||
|
struct ggml_tensor * inp_KQ_mask; // F32 [n_ctx, n_batch]
|
||||||
|
struct ggml_tensor * inp_K_shift; // I32 [n_ctx]
|
||||||
|
|
||||||
#ifdef GGML_USE_MPI
|
#ifdef GGML_USE_MPI
|
||||||
ggml_mpi_context * ctx_mpi = NULL;
|
ggml_mpi_context * ctx_mpi = NULL;
|
||||||
|
@ -2311,18 +2320,18 @@ struct llama_model_loader {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (type_max) {
|
switch (type_max) {
|
||||||
case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
|
case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
|
||||||
case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
|
case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
|
||||||
case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
|
case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
|
||||||
case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
|
case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
|
||||||
case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
|
case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
|
||||||
case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
|
case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
|
||||||
case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
|
case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
|
||||||
case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
|
case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
|
||||||
case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
|
case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
|
||||||
case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
|
case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
|
||||||
case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
|
case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
|
||||||
case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
|
case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
|
||||||
case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
|
case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
|
||||||
case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
|
case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
|
||||||
default:
|
default:
|
||||||
|
@ -4119,22 +4128,24 @@ static struct ggml_tensor * llm_build_inp_embd(
|
||||||
const llama_hparams & hparams,
|
const llama_hparams & hparams,
|
||||||
const llama_batch & batch,
|
const llama_batch & batch,
|
||||||
struct ggml_tensor * tok_embd,
|
struct ggml_tensor * tok_embd,
|
||||||
|
struct ggml_tensor * inp_tokens,
|
||||||
|
struct ggml_tensor * inp_embd,
|
||||||
const llm_build_cb & cb) {
|
const llm_build_cb & cb) {
|
||||||
const int64_t n_embd = hparams.n_embd;
|
const int64_t n_embd = hparams.n_embd;
|
||||||
|
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
if (batch.token) {
|
if (batch.token) {
|
||||||
struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
|
struct ggml_tensor * inp_tokens_v = ggml_view_1d(ctx, inp_tokens, batch.n_tokens, 0);
|
||||||
cb(inp_tokens, "inp_tokens", -1);
|
cb(inp_tokens, "inp_tokens", -1);
|
||||||
|
|
||||||
inpL = ggml_get_rows(ctx, tok_embd, inp_tokens);
|
inpL = ggml_get_rows(ctx, tok_embd, inp_tokens_v);
|
||||||
} else {
|
} else {
|
||||||
#ifdef GGML_USE_MPI
|
#ifdef GGML_USE_MPI
|
||||||
GGML_ASSERT(false && "not implemented");
|
GGML_ASSERT(false && "not implemented");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
|
inpL = ggml_view_2d(ctx, inp_embd, n_embd, batch.n_tokens, inp_embd->nb[1], 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return inpL;
|
return inpL;
|
||||||
|
@ -4148,6 +4159,7 @@ static void llm_build_k_shift(
|
||||||
const llama_cparams & cparams,
|
const llama_cparams & cparams,
|
||||||
const llama_kv_cache & kv,
|
const llama_kv_cache & kv,
|
||||||
struct ggml_cgraph * graph,
|
struct ggml_cgraph * graph,
|
||||||
|
struct ggml_tensor * K_shift,
|
||||||
llm_rope_type type,
|
llm_rope_type type,
|
||||||
int64_t n_ctx,
|
int64_t n_ctx,
|
||||||
float freq_base,
|
float freq_base,
|
||||||
|
@ -4164,9 +4176,6 @@ static void llm_build_k_shift(
|
||||||
const float beta_fast = cparams.yarn_beta_fast;
|
const float beta_fast = cparams.yarn_beta_fast;
|
||||||
const float beta_slow = cparams.yarn_beta_slow;
|
const float beta_slow = cparams.yarn_beta_slow;
|
||||||
|
|
||||||
struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_ctx);
|
|
||||||
cb(K_shift, "K_shift", -1);
|
|
||||||
|
|
||||||
int rope_type = 0;
|
int rope_type = 0;
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
@ -4470,9 +4479,9 @@ static struct ggml_tensor * llm_build_kv(
|
||||||
|
|
||||||
// these nodes are added to the graph together so that they are not reordered
|
// these nodes are added to the graph together so that they are not reordered
|
||||||
// by doing so, the number of splits in the graph is reduced
|
// by doing so, the number of splits in the graph is reduced
|
||||||
|
ggml_build_forward_expand(graph, q_cur);
|
||||||
ggml_build_forward_expand(graph, k_cur);
|
ggml_build_forward_expand(graph, k_cur);
|
||||||
ggml_build_forward_expand(graph, v_cur);
|
ggml_build_forward_expand(graph, v_cur);
|
||||||
ggml_build_forward_expand(graph, q_cur);
|
|
||||||
|
|
||||||
llm_build_kv_store(ctx, hparams, kv, graph, k_cur, v_cur, n_ctx, n_tokens, kv_head, cb, il);
|
llm_build_kv_store(ctx, hparams, kv, graph, k_cur, v_cur, n_ctx, n_tokens, kv_head, cb, il);
|
||||||
|
|
||||||
|
@ -4487,6 +4496,7 @@ static struct ggml_tensor * llm_build_kv(
|
||||||
|
|
||||||
struct llm_build_context {
|
struct llm_build_context {
|
||||||
const llama_model & model;
|
const llama_model & model;
|
||||||
|
const llama_context & lctx;
|
||||||
const llama_hparams & hparams;
|
const llama_hparams & hparams;
|
||||||
const llama_cparams & cparams;
|
const llama_cparams & cparams;
|
||||||
const llama_batch & batch;
|
const llama_batch & batch;
|
||||||
|
@ -4533,6 +4543,7 @@ struct llm_build_context {
|
||||||
const llm_build_cb & cb,
|
const llm_build_cb & cb,
|
||||||
bool worst_case) :
|
bool worst_case) :
|
||||||
model (lctx.model),
|
model (lctx.model),
|
||||||
|
lctx (lctx),
|
||||||
hparams (model.hparams),
|
hparams (model.hparams),
|
||||||
cparams (lctx.cparams),
|
cparams (lctx.cparams),
|
||||||
batch (batch),
|
batch (batch),
|
||||||
|
@ -4593,20 +4604,20 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
// shift the entire K-cache if needed
|
// shift the entire K-cache if needed
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -4777,20 +4788,20 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
// shift the entire K-cache if needed
|
// shift the entire K-cache if needed
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -4898,20 +4909,20 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
// shift the entire K-cache if needed
|
// shift the entire K-cache if needed
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -5020,15 +5031,15 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * pos;
|
struct ggml_tensor * pos;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
|
pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
|
||||||
|
@ -5117,19 +5128,19 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -5324,11 +5335,11 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -5414,11 +5425,11 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
inpL = llm_build_norm(ctx0, inpL, hparams,
|
inpL = llm_build_norm(ctx0, inpL, hparams,
|
||||||
|
@ -5507,11 +5518,11 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -5603,20 +5614,20 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
// shift the entire K-cache if needed
|
// shift the entire K-cache if needed
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -5726,20 +5737,20 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
// shift the entire K-cache if needed
|
// shift the entire K-cache if needed
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -5840,20 +5851,20 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
// shift the entire K-cache if needed
|
// shift the entire K-cache if needed
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -5961,20 +5972,20 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * ffn_output;
|
struct ggml_tensor * ffn_output;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
// shift the entire K-cache if needed
|
// shift the entire K-cache if needed
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -6083,20 +6094,20 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
// shift the entire K-cache if needed
|
// shift the entire K-cache if needed
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -6190,15 +6201,15 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * pos;
|
struct ggml_tensor * pos;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
|
pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
|
||||||
|
@ -6288,20 +6299,20 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
cb(inp_pos, "inp_pos", -1);
|
cb(inp_pos, "inp_pos", -1);
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
|
||||||
cb(KQ_mask, "KQ_mask", -1);
|
cb(KQ_mask, "KQ_mask", -1);
|
||||||
|
|
||||||
// shift the entire K-cache if needed
|
// shift the entire K-cache if needed
|
||||||
if (do_rope_shift) {
|
if (do_rope_shift) {
|
||||||
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
|
llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
@ -6395,15 +6406,7 @@ static struct ggml_cgraph * llama_build_graph(
|
||||||
// check if we should build the worst-case graph (for memory measurement)
|
// check if we should build the worst-case graph (for memory measurement)
|
||||||
const bool worst_case = ggml_tallocr_is_measure(lctx.alloc);
|
const bool worst_case = ggml_tallocr_is_measure(lctx.alloc);
|
||||||
|
|
||||||
// keep track of the input that has already been allocated
|
|
||||||
bool alloc_inp_tokens = false;
|
|
||||||
bool alloc_inp_embd = false;
|
|
||||||
bool alloc_inp_pos = false;
|
|
||||||
bool alloc_inp_KQ_mask = false;
|
|
||||||
bool alloc_inp_K_shift = false;
|
|
||||||
|
|
||||||
// this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
|
// this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
|
||||||
// TODO: improve handling of input and output tensors, then replace this with ggml_set_name
|
|
||||||
llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
|
llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
|
||||||
if (il >= 0) {
|
if (il >= 0) {
|
||||||
ggml_format_name(cur, "%s-%d", name, il);
|
ggml_format_name(cur, "%s-%d", name, il);
|
||||||
|
@ -6411,127 +6414,79 @@ static struct ggml_cgraph * llama_build_graph(
|
||||||
ggml_set_name(cur, name);
|
ggml_set_name(cur, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (!lctx.cparams.offload_kqv) {
|
if (!lctx.cparams.offload_kqv) {
|
||||||
if (strcmp(name, "kqv_merged_cont") == 0) {
|
if (strcmp(name, "kqv_merged_cont") == 0) {
|
||||||
// all nodes between the KV store and the attention output are run on the CPU
|
// all nodes between the KV store and the attention output are run on the CPU
|
||||||
ggml_backend_sched_set_node_backend(lctx.sched, cur, lctx.backend_cpu);
|
ggml_backend_sched_set_node_backend(lctx.sched, cur, lctx.backend_cpu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// allocate input tensors and set input data
|
|
||||||
//
|
|
||||||
|
|
||||||
if (!alloc_inp_tokens && strcmp(name, "inp_tokens") == 0) {
|
|
||||||
ggml_tallocr_alloc(lctx.alloc, cur);
|
|
||||||
|
|
||||||
if (!ggml_tallocr_is_measure(lctx.alloc) && batch.token) {
|
|
||||||
const int64_t n_tokens = cur->ne[0];
|
|
||||||
|
|
||||||
ggml_backend_tensor_set(cur, batch.token, 0, n_tokens*ggml_element_size(cur));
|
|
||||||
}
|
|
||||||
|
|
||||||
alloc_inp_tokens = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!alloc_inp_embd && strcmp(name, "inp_embd") == 0 && batch.embd) {
|
|
||||||
ggml_tallocr_alloc(lctx.alloc, cur);
|
|
||||||
|
|
||||||
if (!ggml_tallocr_is_measure(lctx.alloc) && batch.embd) {
|
|
||||||
const int64_t n_embd = cur->ne[0];
|
|
||||||
const int64_t n_tokens = cur->ne[1];
|
|
||||||
|
|
||||||
ggml_backend_tensor_set(cur, batch.embd, 0, n_tokens*n_embd*ggml_element_size(cur));
|
|
||||||
}
|
|
||||||
|
|
||||||
alloc_inp_embd = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!alloc_inp_pos && strcmp(name, "inp_pos") == 0) {
|
|
||||||
ggml_tallocr_alloc(lctx.alloc, cur);
|
|
||||||
|
|
||||||
if (!ggml_tallocr_is_measure(lctx.alloc) && batch.pos) {
|
|
||||||
const int64_t n_tokens = cur->ne[0];
|
|
||||||
|
|
||||||
static_assert(std::is_same<llama_pos, int32_t>::value, "llama_pos must be int32_t");
|
|
||||||
ggml_backend_tensor_set(cur, batch.pos, 0, n_tokens*ggml_element_size(cur));
|
|
||||||
}
|
|
||||||
|
|
||||||
alloc_inp_pos = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!alloc_inp_KQ_mask && strcmp(name, "KQ_mask") == 0) {
|
|
||||||
ggml_tallocr_alloc(lctx.alloc, cur);
|
|
||||||
|
|
||||||
if (!ggml_tallocr_is_measure(lctx.alloc)) {
|
|
||||||
const int64_t n_kv = cur->ne[0];
|
|
||||||
const int64_t n_tokens = cur->ne[1];
|
|
||||||
|
|
||||||
float * data;
|
|
||||||
if (ggml_backend_buffer_is_host(cur->buffer)) {
|
|
||||||
data = (float *) cur->data;
|
|
||||||
} else {
|
|
||||||
lctx.buf_copy.resize(ggml_nbytes(cur));
|
|
||||||
data = (float *) lctx.buf_copy.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int h = 0; h < 1; ++h) {
|
|
||||||
for (int j = 0; j < n_tokens; ++j) {
|
|
||||||
const llama_pos pos = batch.pos[j];
|
|
||||||
const llama_seq_id seq_id = batch.seq_id[j][0];
|
|
||||||
|
|
||||||
for (int i = 0; i < n_kv; ++i) {
|
|
||||||
float f;
|
|
||||||
if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
|
|
||||||
f = -INFINITY;
|
|
||||||
} else {
|
|
||||||
f = 0;
|
|
||||||
}
|
|
||||||
data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data != cur->data) {
|
|
||||||
ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
alloc_inp_KQ_mask = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!alloc_inp_K_shift && strcmp(name, "K_shift") == 0) {
|
|
||||||
ggml_tallocr_alloc(lctx.alloc, cur);
|
|
||||||
|
|
||||||
if (!ggml_tallocr_is_measure(lctx.alloc)) {
|
|
||||||
const int64_t n_ctx = cur->ne[0];
|
|
||||||
|
|
||||||
int32_t * data;
|
|
||||||
if (ggml_backend_buffer_is_host(cur->buffer)) {
|
|
||||||
data = (int32_t *) cur->data;
|
|
||||||
} else {
|
|
||||||
lctx.buf_copy.resize(ggml_nbytes(cur));
|
|
||||||
data = (int32_t *) lctx.buf_copy.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < n_ctx; ++i) {
|
|
||||||
data[i] = lctx.kv_self.cells[i].delta;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data != cur->data) {
|
|
||||||
ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
alloc_inp_K_shift = true;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_cgraph * result = NULL;
|
struct ggml_cgraph * result = NULL;
|
||||||
|
|
||||||
struct llm_build_context llm(lctx, batch, cb, worst_case);
|
struct llm_build_context llm(lctx, batch, cb, worst_case);
|
||||||
|
|
||||||
|
//
|
||||||
|
// set input data
|
||||||
|
//
|
||||||
|
|
||||||
|
if (!ggml_tallocr_is_measure(lctx.alloc)) {
|
||||||
|
if (batch.token) {
|
||||||
|
const int64_t n_tokens = batch.n_tokens;
|
||||||
|
|
||||||
|
ggml_backend_tensor_set(lctx.inp_tokens, batch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (batch.embd) {
|
||||||
|
const int64_t n_embd = llm.n_embd;
|
||||||
|
const int64_t n_tokens = batch.n_tokens;
|
||||||
|
|
||||||
|
ggml_backend_tensor_set(lctx.inp_embd, batch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (batch.pos) {
|
||||||
|
const int64_t n_tokens = batch.n_tokens;
|
||||||
|
|
||||||
|
ggml_backend_tensor_set(lctx.inp_pos, batch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const int64_t n_kv = llm.n_kv;
|
||||||
|
const int64_t n_tokens = batch.n_tokens;
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
|
||||||
|
float * data = (float *) lctx.inp_KQ_mask->data;
|
||||||
|
|
||||||
|
for (int h = 0; h < 1; ++h) {
|
||||||
|
for (int j = 0; j < n_tokens; ++j) {
|
||||||
|
const llama_pos pos = batch.pos[j];
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[j][0];
|
||||||
|
|
||||||
|
for (int i = 0; i < n_kv; ++i) {
|
||||||
|
float f;
|
||||||
|
if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
|
||||||
|
f = -INFINITY;
|
||||||
|
} else {
|
||||||
|
f = 0;
|
||||||
|
}
|
||||||
|
data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (llm.do_rope_shift) {
|
||||||
|
const int64_t n_ctx = llm.n_ctx;
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer));
|
||||||
|
int32_t * data = (int32_t *) lctx.inp_K_shift->data;
|
||||||
|
|
||||||
|
for (int i = 0; i < n_ctx; ++i) {
|
||||||
|
data[i] = lctx.kv_self.cells[i].delta;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
llm.init();
|
llm.init();
|
||||||
|
|
||||||
switch (model.arch) {
|
switch (model.arch) {
|
||||||
|
@ -10301,6 +10256,35 @@ struct llama_context * llama_new_context_with_model(
|
||||||
ctx->embedding.resize(hparams.n_embd);
|
ctx->embedding.resize(hparams.n_embd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// graph inputs
|
||||||
|
{
|
||||||
|
ggml_init_params init_params = {
|
||||||
|
/* .mem_size */ ggml_tensor_overhead()*5,
|
||||||
|
/* .mem_buffer */ nullptr,
|
||||||
|
/* .no_alloc */ true,
|
||||||
|
};
|
||||||
|
ctx->ctx_input = ggml_init(init_params);
|
||||||
|
|
||||||
|
ctx->inp_tokens = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
|
||||||
|
ctx->inp_embd = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, hparams.n_embd, cparams.n_batch);
|
||||||
|
ctx->inp_pos = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
|
||||||
|
ctx->inp_KQ_mask = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_ctx, cparams.n_batch);
|
||||||
|
ctx->inp_K_shift = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_ctx);
|
||||||
|
|
||||||
|
ggml_set_name(ctx->inp_tokens, "inp_tokens");
|
||||||
|
ggml_set_name(ctx->inp_embd, "inp_embd");
|
||||||
|
ggml_set_name(ctx->inp_pos, "inp_pos");
|
||||||
|
ggml_set_name(ctx->inp_KQ_mask, "inp_KQ_mask");
|
||||||
|
ggml_set_name(ctx->inp_K_shift, "inp_K_shift");
|
||||||
|
|
||||||
|
ctx->buf_input = ggml_backend_alloc_ctx_tensors_from_buft(ctx->ctx_input, llama_default_buffer_type_cpu(true));
|
||||||
|
|
||||||
|
LLAMA_LOG_INFO("%s: %10s input buffer size = %8.2f MiB\n", __func__,
|
||||||
|
ggml_backend_buffer_name(ctx->buf_input),
|
||||||
|
ggml_backend_buffer_get_size(ctx->buf_input) / 1024.0 / 1024.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// scheduler and compute buffers
|
||||||
{
|
{
|
||||||
// buffer types used for the compute buffer of each backend
|
// buffer types used for the compute buffer of each backend
|
||||||
std::vector<ggml_backend_buffer_type_t> backend_buft;
|
std::vector<ggml_backend_buffer_type_t> backend_buft;
|
||||||
|
@ -10327,9 +10311,6 @@ struct llama_context * llama_new_context_with_model(
|
||||||
|
|
||||||
// initialize scheduler with the worst-case graph
|
// initialize scheduler with the worst-case graph
|
||||||
ggml_backend_sched_init_measure(ctx->sched, gf);
|
ggml_backend_sched_init_measure(ctx->sched, gf);
|
||||||
// note: the number of splits during measure is higher than during inference due to the kv shift
|
|
||||||
int n_splits = ggml_backend_sched_get_n_splits(ctx->sched);
|
|
||||||
LLAMA_LOG_INFO("%s: graph splits (measure): %d\n", __func__, n_splits);
|
|
||||||
ctx->alloc = ggml_backend_sched_get_tallocr(ctx->sched, ctx->backend_cpu);
|
ctx->alloc = ggml_backend_sched_get_tallocr(ctx->sched, ctx->backend_cpu);
|
||||||
|
|
||||||
for (ggml_backend_t backend : ctx->backends) {
|
for (ggml_backend_t backend : ctx->backends) {
|
||||||
|
@ -10338,6 +10319,10 @@ struct llama_context * llama_new_context_with_model(
|
||||||
ggml_backend_buffer_name(buf),
|
ggml_backend_buffer_name(buf),
|
||||||
ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
|
ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// note: the number of splits during measure is higher than during inference due to the kv shift
|
||||||
|
int n_splits = ggml_backend_sched_get_n_splits(ctx->sched);
|
||||||
|
LLAMA_LOG_INFO("%s: graph splits (measure): %d\n", __func__, n_splits);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue