Merge branch 'upstream' into concedo_experimental

# Conflicts:
#	.github/workflows/docker.yml
#	CMakeLists.txt
#	CONTRIBUTING.md
#	docs/android.md
#	docs/docker.md
#	examples/embedding/embedding.cpp
#	examples/imatrix/imatrix.cpp
#	examples/infill/infill.cpp
#	examples/llama-bench/llama-bench.cpp
#	examples/main/README.md
#	examples/parallel/parallel.cpp
#	examples/perplexity/perplexity.cpp
#	examples/quantize-stats/quantize-stats.cpp
#	examples/save-load-state/save-load-state.cpp
#	examples/server/README.md
#	examples/simple/CMakeLists.txt
#	examples/speculative/speculative.cpp
#	flake.lock
#	ggml/src/CMakeLists.txt
#	ggml/src/ggml-blas.cpp
#	pocs/vdot/q8dot.cpp
#	pocs/vdot/vdot.cpp
#	scripts/debug-test.sh
#	scripts/sync-ggml.last
#	src/llama.cpp
#	tests/test-backend-ops.cpp
#	tests/test-chat-template.cpp
#	tests/test-quantize-fns.cpp
#	tests/test-quantize-perf.cpp
#	tests/test-tokenizer-0.cpp
#	tests/test-tokenizer-1-bpe.cpp
#	tests/test-tokenizer-1-spm.cpp
This commit is contained in:
Concedo 2024-10-11 11:59:59 +08:00
commit e692a79aab
61 changed files with 2579 additions and 1949 deletions

View file

@ -126,10 +126,10 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) {
return true;
}
static bool run(llama_context * ctx, const gpt_params & params) {
static bool run(llama_context * ctx, const common_params & params) {
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
LOG_ERR("%s : failed to eval\n", __func__);
@ -142,13 +142,13 @@ static bool run(llama_context * ctx, const gpt_params & params) {
int main(int argc, char ** argv) {
callback_data cb_data;
gpt_params params;
common_params params;
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
return 1;
}
gpt_init();
common_init();
llama_backend_init();
llama_numa_init(params.numa);
@ -160,7 +160,7 @@ int main(int argc, char ** argv) {
params.warmup = false;
// init
llama_init_result llama_init = llama_init_from_gpt_params(params);
common_init_result llama_init = common_init_from_params(params);
llama_model * model = llama_init.model;
llama_context * ctx = llama_init.context;
@ -172,7 +172,7 @@ int main(int argc, char ** argv) {
// print system information
{
LOG_INF("\n");
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
LOG_INF("\n");
}