mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-13 18:39:48 +00:00
merge new rope param nonsense
This commit is contained in:
commit
b84e210f0d
38 changed files with 2811 additions and 764 deletions
254
ggml.c
254
ggml.c
|
@ -6407,6 +6407,54 @@ struct ggml_tensor * ggml_cont_inplace(
|
|||
return ggml_cont_impl(ctx, a, true);
|
||||
}
|
||||
|
||||
|
||||
// make contiguous, with new shape
|
||||
GGML_API struct ggml_tensor * ggml_cont_1d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int64_t ne0) {
|
||||
return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
|
||||
}
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_cont_2d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int64_t ne0,
|
||||
int64_t ne1) {
|
||||
return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
|
||||
}
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_cont_3d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int64_t ne0,
|
||||
int64_t ne1,
|
||||
int64_t ne2) {
|
||||
return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_cont_4d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int64_t ne0,
|
||||
int64_t ne1,
|
||||
int64_t ne2,
|
||||
int64_t ne3) {
|
||||
GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
|
||||
|
||||
bool is_node = false;
|
||||
|
||||
struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
|
||||
ggml_format_name(result, "%s (cont)", a->name);
|
||||
|
||||
result->op = GGML_OP_CONT;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// ggml_reshape
|
||||
|
||||
struct ggml_tensor * ggml_reshape(
|
||||
|
@ -6969,7 +7017,7 @@ struct ggml_tensor * ggml_soft_max_back_inplace(
|
|||
static struct ggml_tensor * ggml_rope_impl(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
struct ggml_tensor * b,
|
||||
int n_dims,
|
||||
int mode,
|
||||
int n_ctx,
|
||||
|
@ -6978,7 +7026,10 @@ static struct ggml_tensor * ggml_rope_impl(
|
|||
float xpos_base,
|
||||
bool xpos_down,
|
||||
bool inplace) {
|
||||
GGML_ASSERT(n_past >= 0);
|
||||
GGML_ASSERT(ggml_is_vector(b));
|
||||
GGML_ASSERT(b->type == GGML_TYPE_I32);
|
||||
GGML_ASSERT(a->ne[2] == b->ne[0]);
|
||||
|
||||
bool is_node = false;
|
||||
|
||||
if (a->grad) {
|
||||
|
@ -6987,7 +7038,7 @@ static struct ggml_tensor * ggml_rope_impl(
|
|||
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
||||
int32_t params[8] = { n_past, n_dims, mode, n_ctx };
|
||||
int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx };
|
||||
memcpy(params + 4, &freq_base, sizeof(float));
|
||||
memcpy(params + 5, &freq_scale, sizeof(float));
|
||||
memcpy(params + 6, &xpos_base, sizeof(float));
|
||||
|
@ -6997,6 +7048,7 @@ static struct ggml_tensor * ggml_rope_impl(
|
|||
result->op = GGML_OP_ROPE;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
result->src[1] = b;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -7004,55 +7056,55 @@ static struct ggml_tensor * ggml_rope_impl(
|
|||
struct ggml_tensor * ggml_rope(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
struct ggml_tensor * b,
|
||||
int n_dims,
|
||||
int mode,
|
||||
int n_ctx) {
|
||||
return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, false);
|
||||
return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_rope_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
struct ggml_tensor * b,
|
||||
int n_dims,
|
||||
int mode,
|
||||
int n_ctx) {
|
||||
return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, true);
|
||||
return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, true);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_rope_custom(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
struct ggml_tensor * b,
|
||||
int n_dims,
|
||||
int mode,
|
||||
int n_ctx,
|
||||
float freq_base,
|
||||
float freq_scale) {
|
||||
return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, false);
|
||||
return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_rope_custom_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
struct ggml_tensor * b,
|
||||
int n_dims,
|
||||
int mode,
|
||||
int n_ctx,
|
||||
float freq_base,
|
||||
float freq_scale) {
|
||||
return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, true);
|
||||
return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, true);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_rope_xpos_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
struct ggml_tensor * b,
|
||||
int n_dims,
|
||||
float base,
|
||||
bool down) {
|
||||
return ggml_rope_impl(ctx, a, n_past, n_dims, 0, 0, 10000.0f, 1.0f, base, down, true);
|
||||
return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 10000.0f, 1.0f, base, down, true);
|
||||
}
|
||||
|
||||
// ggml_rope_back
|
||||
|
@ -7060,7 +7112,7 @@ struct ggml_tensor * ggml_rope_xpos_inplace(
|
|||
struct ggml_tensor * ggml_rope_back(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
struct ggml_tensor * b,
|
||||
int n_dims,
|
||||
int mode,
|
||||
int n_ctx,
|
||||
|
@ -7068,7 +7120,10 @@ struct ggml_tensor * ggml_rope_back(
|
|||
float freq_scale,
|
||||
float xpos_base,
|
||||
bool xpos_down) {
|
||||
GGML_ASSERT(n_past >= 0);
|
||||
GGML_ASSERT(ggml_is_vector(b));
|
||||
GGML_ASSERT(b->type == GGML_TYPE_I32);
|
||||
GGML_ASSERT(a->ne[2] == b->ne[0]);
|
||||
|
||||
GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
|
||||
|
||||
bool is_node = false;
|
||||
|
@ -7079,7 +7134,7 @@ struct ggml_tensor * ggml_rope_back(
|
|||
|
||||
struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
|
||||
|
||||
int32_t params[8] = { n_past, n_dims, mode, n_ctx };
|
||||
int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx };
|
||||
memcpy(params + 4, &freq_base, sizeof(float));
|
||||
memcpy(params + 5, &freq_scale, sizeof(float));
|
||||
memcpy(params + 6, &xpos_base, sizeof(float));
|
||||
|
@ -7089,6 +7144,7 @@ struct ggml_tensor * ggml_rope_back(
|
|||
result->op = GGML_OP_ROPE_BACK;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
result->src[1] = b;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -8799,8 +8855,6 @@ static void ggml_compute_forward_add_f32(
|
|||
#else
|
||||
ggml_vec_add_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
|
||||
#endif
|
||||
// }
|
||||
// }
|
||||
}
|
||||
} else {
|
||||
// src1 is not contiguous
|
||||
|
@ -12452,13 +12506,11 @@ static void ggml_compute_forward_alibi_f16(
|
|||
return;
|
||||
}
|
||||
|
||||
const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
const int n_head = ((int32_t *) dst->op_params)[1];
|
||||
float max_bias;
|
||||
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
||||
|
||||
assert(n_past >= 0);
|
||||
|
||||
const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
|
||||
const int ne1 = src0->ne[1]; // seq_len_without_past
|
||||
const int ne2 = src0->ne[2]; // n_head -> this is k
|
||||
|
@ -12473,7 +12525,7 @@ static void ggml_compute_forward_alibi_f16(
|
|||
//const int nb3 = src0->nb[3];
|
||||
|
||||
GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
|
||||
GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
|
||||
//GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
|
||||
GGML_ASSERT(n_head == ne2);
|
||||
|
||||
// add alibi to src0 (KQ_scaled)
|
||||
|
@ -12619,8 +12671,8 @@ static void ggml_compute_forward_clamp(
|
|||
static void ggml_compute_forward_rope_f32(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
const struct ggml_tensor * src1,
|
||||
struct ggml_tensor * dst) {
|
||||
|
||||
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||
return;
|
||||
}
|
||||
|
@ -12630,9 +12682,9 @@ static void ggml_compute_forward_rope_f32(
|
|||
|
||||
// these two only relevant for xPos RoPE:
|
||||
float xpos_base;
|
||||
bool xpos_down;
|
||||
bool xpos_down;
|
||||
|
||||
const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
const int n_dims = ((int32_t *) dst->op_params)[1];
|
||||
const int mode = ((int32_t *) dst->op_params)[2];
|
||||
const int n_ctx = ((int32_t *) dst->op_params)[3];
|
||||
|
@ -12641,8 +12693,6 @@ static void ggml_compute_forward_rope_f32(
|
|||
memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
|
||||
memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
|
||||
|
||||
assert(n_past >= 0);
|
||||
|
||||
GGML_TENSOR_UNARY_OP_LOCALS;
|
||||
|
||||
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
|
||||
|
@ -12673,9 +12723,11 @@ static void ggml_compute_forward_rope_f32(
|
|||
const bool is_neox = mode & 2;
|
||||
const bool is_glm = mode & 4;
|
||||
|
||||
const int32_t * pos = (const int32_t *) src1->data;
|
||||
|
||||
for (int64_t i3 = 0; i3 < ne3; i3++) {
|
||||
for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
|
||||
const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
|
||||
for (int64_t i2 = 0; i2 < ne2; i2++) {
|
||||
const int64_t p = pos[i2];
|
||||
for (int64_t i1 = 0; i1 < ne1; i1++) {
|
||||
if (ir++ < ir0) continue;
|
||||
if (ir > ir1) break;
|
||||
|
@ -12712,7 +12764,7 @@ static void ggml_compute_forward_rope_f32(
|
|||
const float cos_theta = cosf(theta);
|
||||
const float sin_theta = sinf(theta);
|
||||
// zeta scaling for xPos only:
|
||||
float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), (n_past + i2) / xpos_base) : 1.0f;
|
||||
float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
|
||||
if (xpos_down) zeta = 1.0f / zeta;
|
||||
|
||||
theta *= theta_scale;
|
||||
|
@ -12757,8 +12809,8 @@ static void ggml_compute_forward_rope_f32(
|
|||
static void ggml_compute_forward_rope_f16(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
const struct ggml_tensor * src1,
|
||||
struct ggml_tensor * dst) {
|
||||
|
||||
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||
return;
|
||||
}
|
||||
|
@ -12766,15 +12818,13 @@ static void ggml_compute_forward_rope_f16(
|
|||
float freq_base;
|
||||
float freq_scale;
|
||||
|
||||
const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
const int n_dims = ((int32_t *) dst->op_params)[1];
|
||||
const int mode = ((int32_t *) dst->op_params)[2];
|
||||
const int n_ctx = ((int32_t *) dst->op_params)[3];
|
||||
memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
|
||||
memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
|
||||
|
||||
assert(n_past >= 0);
|
||||
|
||||
GGML_TENSOR_UNARY_OP_LOCALS;
|
||||
|
||||
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
|
||||
|
@ -12805,9 +12855,11 @@ static void ggml_compute_forward_rope_f16(
|
|||
const bool is_neox = mode & 2;
|
||||
const bool is_glm = mode & 4;
|
||||
|
||||
const int32_t * pos = (const int32_t *) src1->data;
|
||||
|
||||
for (int64_t i3 = 0; i3 < ne3; i3++) {
|
||||
for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
|
||||
const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
|
||||
for (int64_t i2 = 0; i2 < ne2; i2++) {
|
||||
const int64_t p = pos[i2];
|
||||
for (int64_t i1 = 0; i1 < ne1; i1++) {
|
||||
if (ir++ < ir0) continue;
|
||||
if (ir > ir1) break;
|
||||
|
@ -12886,15 +12938,16 @@ static void ggml_compute_forward_rope_f16(
|
|||
static void ggml_compute_forward_rope(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
const struct ggml_tensor * src1,
|
||||
struct ggml_tensor * dst) {
|
||||
switch (src0->type) {
|
||||
case GGML_TYPE_F16:
|
||||
{
|
||||
ggml_compute_forward_rope_f16(params, src0, dst);
|
||||
ggml_compute_forward_rope_f16(params, src0, src1, dst);
|
||||
} break;
|
||||
case GGML_TYPE_F32:
|
||||
{
|
||||
ggml_compute_forward_rope_f32(params, src0, dst);
|
||||
ggml_compute_forward_rope_f32(params, src0, src1, dst);
|
||||
} break;
|
||||
default:
|
||||
{
|
||||
|
@ -12908,6 +12961,7 @@ static void ggml_compute_forward_rope(
|
|||
static void ggml_compute_forward_rope_back_f32(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
const struct ggml_tensor * src1,
|
||||
struct ggml_tensor * dst) {
|
||||
|
||||
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||
|
@ -12925,7 +12979,7 @@ static void ggml_compute_forward_rope_back_f32(
|
|||
float xpos_base;
|
||||
bool xpos_down;
|
||||
|
||||
const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
const int n_dims = ((int32_t *) dst->op_params)[1];
|
||||
const int mode = ((int32_t *) dst->op_params)[2];
|
||||
const int n_ctx = ((int32_t *) dst->op_params)[3]; UNUSED(n_ctx);
|
||||
|
@ -12934,8 +12988,6 @@ static void ggml_compute_forward_rope_back_f32(
|
|||
memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
|
||||
memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
|
||||
|
||||
assert(n_past >= 0);
|
||||
|
||||
GGML_TENSOR_UNARY_OP_LOCALS;
|
||||
|
||||
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
|
||||
|
@ -12962,9 +13014,11 @@ static void ggml_compute_forward_rope_back_f32(
|
|||
|
||||
const bool is_neox = mode & 2;
|
||||
|
||||
const int32_t * pos = (const int32_t *) src1->data;
|
||||
|
||||
for (int64_t i3 = 0; i3 < ne3; i3++) {
|
||||
for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
|
||||
const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
|
||||
for (int64_t i2 = 0; i2 < ne2; i2++) {
|
||||
const int64_t p = pos[i2];
|
||||
for (int64_t i1 = 0; i1 < ne1; i1++) {
|
||||
if (ir++ < ir0) continue;
|
||||
if (ir > ir1) break;
|
||||
|
@ -12976,7 +13030,7 @@ static void ggml_compute_forward_rope_back_f32(
|
|||
const float cos_theta = cosf(theta);
|
||||
const float sin_theta = sinf(theta);
|
||||
// zeta scaling for xPos only:
|
||||
float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), (n_past + i2) / xpos_base) : 1.0f;
|
||||
float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
|
||||
if (xpos_down) zeta = 1.0f / zeta;
|
||||
|
||||
theta *= theta_scale;
|
||||
|
@ -13019,6 +13073,7 @@ static void ggml_compute_forward_rope_back_f32(
|
|||
static void ggml_compute_forward_rope_back_f16(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
const struct ggml_tensor * src1,
|
||||
struct ggml_tensor * dst) {
|
||||
|
||||
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||
|
@ -13029,12 +13084,10 @@ static void ggml_compute_forward_rope_back_f16(
|
|||
// dx = rope_back(dy, src1)
|
||||
// src0 is dy, src1 contains options
|
||||
|
||||
const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
||||
const int n_dims = ((int32_t *) dst->op_params)[1];
|
||||
const int mode = ((int32_t *) dst->op_params)[2];
|
||||
|
||||
assert(n_past >= 0);
|
||||
|
||||
GGML_TENSOR_UNARY_OP_LOCALS;
|
||||
|
||||
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
|
||||
|
@ -13061,9 +13114,11 @@ static void ggml_compute_forward_rope_back_f16(
|
|||
|
||||
const bool is_neox = mode & 2;
|
||||
|
||||
const int32_t * pos = (const int32_t *) src1->data;
|
||||
|
||||
for (int64_t i3 = 0; i3 < ne3; i3++) {
|
||||
for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
|
||||
const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
|
||||
for (int64_t i2 = 0; i2 < ne2; i2++) {
|
||||
const int64_t p = pos[i2];
|
||||
for (int64_t i1 = 0; i1 < ne1; i1++) {
|
||||
if (ir++ < ir0) continue;
|
||||
if (ir > ir1) break;
|
||||
|
@ -13115,15 +13170,16 @@ static void ggml_compute_forward_rope_back_f16(
|
|||
static void ggml_compute_forward_rope_back(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
const struct ggml_tensor * src1,
|
||||
struct ggml_tensor * dst) {
|
||||
switch (src0->type) {
|
||||
case GGML_TYPE_F16:
|
||||
{
|
||||
ggml_compute_forward_rope_back_f16(params, src0, dst);
|
||||
ggml_compute_forward_rope_back_f16(params, src0, src1, dst);
|
||||
} break;
|
||||
case GGML_TYPE_F32:
|
||||
{
|
||||
ggml_compute_forward_rope_back_f32(params, src0, dst);
|
||||
ggml_compute_forward_rope_back_f32(params, src0, src1, dst);
|
||||
} break;
|
||||
default:
|
||||
{
|
||||
|
@ -15860,11 +15916,11 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
|
|||
} break;
|
||||
case GGML_OP_ROPE:
|
||||
{
|
||||
ggml_compute_forward_rope(params, tensor->src[0], tensor);
|
||||
ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor);
|
||||
} break;
|
||||
case GGML_OP_ROPE_BACK:
|
||||
{
|
||||
ggml_compute_forward_rope_back(params, tensor->src[0], tensor);
|
||||
ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor);
|
||||
} break;
|
||||
case GGML_OP_ALIBI:
|
||||
{
|
||||
|
@ -16502,7 +16558,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||
{
|
||||
// necessary for llama
|
||||
if (src0->grad) {
|
||||
const int n_past = ((int32_t *) tensor->op_params)[0];
|
||||
//const int n_past = ((int32_t *) tensor->op_params)[0];
|
||||
const int n_dims = ((int32_t *) tensor->op_params)[1];
|
||||
const int mode = ((int32_t *) tensor->op_params)[2];
|
||||
const int n_ctx = ((int32_t *) tensor->op_params)[3];
|
||||
|
@ -16519,7 +16575,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||
src0->grad,
|
||||
ggml_rope_back(ctx,
|
||||
tensor->grad,
|
||||
n_past,
|
||||
src1,
|
||||
n_dims,
|
||||
mode,
|
||||
n_ctx,
|
||||
|
@ -16533,7 +16589,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||
case GGML_OP_ROPE_BACK:
|
||||
{
|
||||
if (src0->grad) {
|
||||
const int n_past = ((int32_t *) tensor->op_params)[0];
|
||||
//const int n_past = ((int32_t *) tensor->op_params)[0];
|
||||
const int n_dims = ((int32_t *) tensor->op_params)[1];
|
||||
const int mode = ((int32_t *) tensor->op_params)[2];
|
||||
const int n_ctx = ((int32_t *) tensor->op_params)[3];
|
||||
|
@ -16550,7 +16606,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||
src0->grad,
|
||||
ggml_rope_impl(ctx,
|
||||
tensor->grad,
|
||||
n_past,
|
||||
src1,
|
||||
n_dims,
|
||||
mode,
|
||||
n_ctx,
|
||||
|
@ -20147,78 +20203,94 @@ int gguf_find_key(const struct gguf_context * ctx, const char * key) {
|
|||
return keyfound;
|
||||
}
|
||||
|
||||
const char * gguf_get_key(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].key.data;
|
||||
const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
|
||||
return ctx->kv[key_id].key.data;
|
||||
}
|
||||
|
||||
enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].type;
|
||||
enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
|
||||
return ctx->kv[key_id].type;
|
||||
}
|
||||
|
||||
enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.arr.type;
|
||||
enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
|
||||
return ctx->kv[key_id].value.arr.type;
|
||||
}
|
||||
|
||||
const void * gguf_get_arr_data(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.arr.data;
|
||||
const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
|
||||
return ctx->kv[key_id].value.arr.data;
|
||||
}
|
||||
|
||||
const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
|
||||
struct gguf_kv * kv = &ctx->kv[key_id];
|
||||
struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
|
||||
return str->data;
|
||||
}
|
||||
|
||||
int gguf_get_arr_n(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.arr.n;
|
||||
int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
|
||||
return ctx->kv[key_id].value.arr.n;
|
||||
}
|
||||
|
||||
uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.uint8;
|
||||
uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
|
||||
return ctx->kv[key_id].value.uint8;
|
||||
}
|
||||
|
||||
int8_t gguf_get_val_i8(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.int8;
|
||||
int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
|
||||
return ctx->kv[key_id].value.int8;
|
||||
}
|
||||
|
||||
uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.uint16;
|
||||
uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
|
||||
return ctx->kv[key_id].value.uint16;
|
||||
}
|
||||
|
||||
int16_t gguf_get_val_i16(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.int16;
|
||||
int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
|
||||
return ctx->kv[key_id].value.int16;
|
||||
}
|
||||
|
||||
uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.uint32;
|
||||
uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
|
||||
return ctx->kv[key_id].value.uint32;
|
||||
}
|
||||
|
||||
int32_t gguf_get_val_i32(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.int32;
|
||||
int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
|
||||
return ctx->kv[key_id].value.int32;
|
||||
}
|
||||
|
||||
float gguf_get_val_f32(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.float32;
|
||||
float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
|
||||
return ctx->kv[key_id].value.float32;
|
||||
}
|
||||
|
||||
uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.uint64;
|
||||
uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
|
||||
return ctx->kv[key_id].value.uint64;
|
||||
}
|
||||
|
||||
int64_t gguf_get_val_i64(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.int64;
|
||||
int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
|
||||
return ctx->kv[key_id].value.int64;
|
||||
}
|
||||
|
||||
double gguf_get_val_f64(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.float64;
|
||||
double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
|
||||
return ctx->kv[key_id].value.float64;
|
||||
}
|
||||
|
||||
bool gguf_get_val_bool(const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.bool_;
|
||||
bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
|
||||
return ctx->kv[key_id].value.bool_;
|
||||
}
|
||||
|
||||
const char * gguf_get_val_str (const struct gguf_context * ctx, int i) {
|
||||
return ctx->kv[i].value.str.data;
|
||||
const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
|
||||
GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
|
||||
return ctx->kv[key_id].value.str.data;
|
||||
}
|
||||
|
||||
int gguf_get_n_tensors(const struct gguf_context * ctx) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue