mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 17:14:36 +00:00
fixed g3 adapter back
This commit is contained in:
parent
9f94f62768
commit
a6149ad0fc
4 changed files with 10 additions and 9 deletions
|
@ -1211,7 +1211,7 @@ struct clip_model_loader {
|
|||
clip_ctx & ctx_clip;
|
||||
std::string fname;
|
||||
|
||||
size_t model_size; // in bytes
|
||||
size_t model_size = 0; // in bytes
|
||||
|
||||
// TODO @ngxson : we should not pass clip_ctx here, it should be clip_vision_model
|
||||
clip_model_loader(const char * fname, clip_ctx & ctx_clip) : ctx_clip(ctx_clip), fname(fname) {
|
||||
|
@ -1258,8 +1258,8 @@ struct clip_model_loader {
|
|||
struct ggml_tensor * cur = ggml_get_tensor(meta, name);
|
||||
size_t tensor_size = ggml_nbytes(cur);
|
||||
model_size += tensor_size;
|
||||
LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
|
||||
__func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
|
||||
// LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
|
||||
// __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,8 +38,8 @@
|
|||
"search": ["System role not supported", "<start_of_turn>"],
|
||||
"name": "Google Gemma 2.",
|
||||
"adapter": {
|
||||
"system_start": "\n",
|
||||
"system_end": "\n",
|
||||
"system_start": "<start_of_turn>user\n",
|
||||
"system_end": "<end_of_turn>\n",
|
||||
"user_start": "<start_of_turn>user\n",
|
||||
"user_end": "<end_of_turn>\n",
|
||||
"assistant_start": "<start_of_turn>model\n",
|
||||
|
@ -49,8 +49,8 @@
|
|||
"search": ["<start_of_image>", "<start_of_turn>", "<end_of_turn>"],
|
||||
"name": "Google Gemma 3.",
|
||||
"adapter": {
|
||||
"system_start": "\n",
|
||||
"system_end": "\n",
|
||||
"system_start": "<start_of_turn>user\n",
|
||||
"system_end": "<end_of_turn>\n",
|
||||
"user_start": "<start_of_turn>user\n",
|
||||
"user_end": "<end_of_turn>\n",
|
||||
"assistant_start": "<start_of_turn>model\n",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"system_start": "\n",
|
||||
"system_end": "\n",
|
||||
"system_start": "<start_of_turn>user\n",
|
||||
"system_end": "<end_of_turn>\n",
|
||||
"user_start": "<start_of_turn>user\n",
|
||||
"user_end": "<end_of_turn>\n",
|
||||
"assistant_start": "<start_of_turn>model\n",
|
||||
|
|
|
@ -2083,6 +2083,7 @@ def transform_genparams(genparams, api_format):
|
|||
elif item['type']=="image_url":
|
||||
if 'image_url' in item and item['image_url'] and item['image_url']['url'] and item['image_url']['url'].startswith("data:image"):
|
||||
images_added.append(item['image_url']['url'].split(",", 1)[1])
|
||||
messages_string += "\n(Attached Image)\n"
|
||||
# If last message, add any tools calls after message content and before message end token if any
|
||||
if message['role'] == "user" and message_index == len(messages_array):
|
||||
# Check if user is passing a openai tools array, if so add to end of prompt before assistant prompt unless tool_choice has been set to None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue