give up and switch to c++17 (+1 squashed commits)

Squashed commits:

[96cfbc48] give up and switch to c++17 (+5 squashed commit)

Squashed commit:

[19ac7c26] Revert "fixed incorrect number of params"

This reverts commit 51388729bc4ffe51ab07ae02ce386219fb5e2876.

[45f730da] Revert "fix for c++17"

This reverts commit 050ba5f72b3358f958722addb9aaa77ff2e428ee.

[51388729] fixed incorrect number of params

[8f1ee54e] build latest vk shaders

[050ba5f7] fix for c++17
This commit is contained in:
Concedo 2024-12-13 17:55:16 +08:00
parent 46d76d913f
commit b7dfb55dea
6 changed files with 696655 additions and 173155 deletions

View file

@ -47,7 +47,7 @@ endif
# Compile flags # Compile flags
# #
# keep standard at C11 and C++11 # keep standard at C11 and C++17
CFLAGS = CFLAGS =
CXXFLAGS = CXXFLAGS =
ifdef KCPP_DEBUG ifdef KCPP_DEBUG
@ -55,7 +55,7 @@ ifdef KCPP_DEBUG
CXXFLAGS = -g -O0 CXXFLAGS = -g -O0
endif endif
CFLAGS += -I. -Iggml/include -Iggml/src -Iggml/src/ggml-cpu -Iinclude -Isrc -I./include -I./include/CL -I./otherarch -I./otherarch/tools -I./otherarch/sdcpp -I./otherarch/sdcpp/thirdparty -I./include/vulkan -O3 -fno-finite-math-only -std=c11 -fPIC -DLOG_DISABLE_LOGS -D_GNU_SOURCE -DGGML_USE_CPU -DGGML_USE_CPU_AARCH64 CFLAGS += -I. -Iggml/include -Iggml/src -Iggml/src/ggml-cpu -Iinclude -Isrc -I./include -I./include/CL -I./otherarch -I./otherarch/tools -I./otherarch/sdcpp -I./otherarch/sdcpp/thirdparty -I./include/vulkan -O3 -fno-finite-math-only -std=c11 -fPIC -DLOG_DISABLE_LOGS -D_GNU_SOURCE -DGGML_USE_CPU -DGGML_USE_CPU_AARCH64
CXXFLAGS += -I. -Iggml/include -Iggml/src -Iggml/src/ggml-cpu -Iinclude -Isrc -I./common -I./include -I./include/CL -I./otherarch -I./otherarch/tools -I./otherarch/sdcpp -I./otherarch/sdcpp/thirdparty -I./include/vulkan -O3 -fno-finite-math-only -std=c++11 -fPIC -DLOG_DISABLE_LOGS -D_GNU_SOURCE -DGGML_USE_CPU -DGGML_USE_CPU_AARCH64 CXXFLAGS += -I. -Iggml/include -Iggml/src -Iggml/src/ggml-cpu -Iinclude -Isrc -I./common -I./include -I./include/CL -I./otherarch -I./otherarch/tools -I./otherarch/sdcpp -I./otherarch/sdcpp/thirdparty -I./include/vulkan -O3 -fno-finite-math-only -std=c++17 -fPIC -DLOG_DISABLE_LOGS -D_GNU_SOURCE -DGGML_USE_CPU -DGGML_USE_CPU_AARCH64
ifndef KCPP_DEBUG ifndef KCPP_DEBUG
CFLAGS += -DNDEBUG -s CFLAGS += -DNDEBUG -s
CXXFLAGS += -DNDEBUG -s CXXFLAGS += -DNDEBUG -s

View file

@ -3838,9 +3838,7 @@ static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_b
GGML_UNUSED(data_size); GGML_UNUSED(data_size);
} }
namespace ggml { namespace ggml::cpu::aarch64 {
namespace cpu {
namespace aarch64 { //ggml::cpu::aarch64
// repack // repack
template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS> template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS>
int repack(struct ggml_tensor *, const void *, size_t); int repack(struct ggml_tensor *, const void *, size_t);
@ -4156,8 +4154,6 @@ static const tensor_traits<block_q4_0, 8, 8> q4_0_8x8_q8_0;
// instance for IQ4 // instance for IQ4
static const tensor_traits<block_iq4_nl, 4, 4> iq4_nl_4x4_q8_0; static const tensor_traits<block_iq4_nl, 4, 4> iq4_nl_4x4_q8_0;
}
}
} // namespace ggml::cpu::aarch64 } // namespace ggml::cpu::aarch64
static void flag_aarch_prepacked_quant(int type) static void flag_aarch_prepacked_quant(int type)
@ -4260,9 +4256,7 @@ static size_t ggml_backend_cpu_aarch64_buffer_type_get_alignment(ggml_backend_bu
GGML_UNUSED(buft); GGML_UNUSED(buft);
} }
namespace ggml { namespace ggml::cpu::aarch64 {
namespace cpu {
namespace aarch64 { //ggml::cpu::aarch64
class extra_buffer_type : ggml::cpu::extra_buffer_type { class extra_buffer_type : ggml::cpu::extra_buffer_type {
bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
if ( op->op == GGML_OP_MUL_MAT && if ( op->op == GGML_OP_MUL_MAT &&
@ -4309,9 +4303,6 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type {
return nullptr; return nullptr;
} }
}; };
}
}
} // namespace ggml::cpu::aarch64 } // namespace ggml::cpu::aarch64
ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void) { ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void) {

View file

@ -3,12 +3,10 @@
#include "ggml-backend-impl.h" #include "ggml-backend-impl.h"
#include "ggml-backend.h" #include "ggml-backend.h"
namespace ggml { namespace ggml::cpu {
namespace cpu {
tensor_traits::~tensor_traits() {} tensor_traits::~tensor_traits() {}
extra_buffer_type::~extra_buffer_type() {} extra_buffer_type::~extra_buffer_type() {}
}
} // namespace ggml::cpu } // namespace ggml::cpu
bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) { bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) {

View file

@ -15,8 +15,7 @@ bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size
#ifdef __cplusplus #ifdef __cplusplus
} }
namespace ggml { namespace ggml::cpu {
namespace cpu {
// register in tensor->extra // register in tensor->extra
class tensor_traits { class tensor_traits {
public: public:
@ -31,7 +30,6 @@ class extra_buffer_type {
virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0; virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0;
virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0; virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0;
}; };
}
} // namespace ggml::cpu } // namespace ggml::cpu
// implemented in ggml-cpu.cpp. // implemented in ggml-cpu.cpp.

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff