From df7c2b9923665390d294175b9954757a3da838ad Mon Sep 17 00:00:00 2001
From: Concedo <39025047+LostRuins@users.noreply.github.com>
Date: Mon, 11 Nov 2024 19:40:47 +0800
Subject: [PATCH] renamed some labels
---
colab.ipynb | 8 ++++----
kcpp_docs.embd | 8 ++++++++
klite.embd | 35 ++++++++++++++++++++++++++++-------
koboldcpp.py | 6 +++---
4 files changed, 43 insertions(+), 14 deletions(-)
diff --git a/colab.ipynb b/colab.ipynb
index efb844490..61ead6a00 100644
--- a/colab.ipynb
+++ b/colab.ipynb
@@ -54,8 +54,8 @@
"FlashAttention = True #@param {type:\"boolean\"}\r\n",
"FACommand = \"\"\r\n",
"#@markdown
\r\n",
- "LoadLLaVAmmproj = False #@param {type:\"boolean\"}\r\n",
- "LLaVAmmproj = \"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\" #@param [\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/mistral-7b-mmproj-v1.5-Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-7b-mmproj-v1.5-Q4_0.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/LLaMA3-8B_mmproj-Q4_1.gguf\"]{allow-input: true}\r\n",
+ "LoadVisionMMProjector = False #@param {type:\"boolean\"}\r\n",
+ "Mmproj = \"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\" #@param [\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/mistral-7b-mmproj-v1.5-Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-7b-mmproj-v1.5-Q4_0.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/LLaMA3-8B_mmproj-Q4_1.gguf\"]{allow-input: true}\r\n",
"VCommand = \"\"\r\n",
"#@markdown
\r\n",
"LoadImgModel = False #@param {type:\"boolean\"}\r\n",
@@ -71,7 +71,7 @@
" raise RuntimeError(\"⚠️Colab did not give you a GPU due to usage limits, this can take a few hours before they let you back in. Check out https://lite.koboldai.net for a free alternative (that does not provide an API link but can load KoboldAI saves and chat cards) or subscribe to Colab Pro for immediate access.⚠️\")\r\n",
"\r\n",
"%cd /content\r\n",
- "if LLaVAmmproj and LoadLLaVAmmproj:\r\n",
+ "if Mmproj and LoadVisionMMProjector:\r\n",
" VCommand = \"--mmproj vmodel.gguf\"\r\n",
"else:\r\n",
" SCommand = \"\"\r\n",
@@ -99,7 +99,7 @@
" Model = Model.replace(\"/blob/main/\", \"/resolve/main/\")\r\n",
"!aria2c -x 10 -o model.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Model\r\n",
"if VCommand:\r\n",
- " !aria2c -x 10 -o vmodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $LLaVAmmproj\r\n",
+ " !aria2c -x 10 -o vmodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Mmproj\r\n",
"if SCommand:\r\n",
" !aria2c -x 10 -o imodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $ImgModel\r\n",
"if WCommand:\r\n",
diff --git a/kcpp_docs.embd b/kcpp_docs.embd
index 647d2fad7..a32ed0433 100644
--- a/kcpp_docs.embd
+++ b/kcpp_docs.embd
@@ -1272,6 +1272,7 @@
"width": 512,
"height": 512,
"seed": -1,
+ "clip_skip": -1,
"sampler_name": "Euler a"
},
"schema": {
@@ -1297,6 +1298,9 @@
"seed": {
"type": "number"
},
+ "clip_skip": {
+ "type": "number"
+ },
"sampler_name": {
"type": "string"
},
@@ -1356,6 +1360,7 @@
"width": 512,
"height": 512,
"seed": -1,
+ "clip_skip": -1,
"sampler_name": "Euler a",
"denoising_strength": 0.6,
"init_images":["base64_image_data"],
@@ -1383,6 +1388,9 @@
"seed": {
"type": "number"
},
+ "clip_skip": {
+ "type": "number"
+ },
"sampler_name": {
"type": "string"
},
diff --git a/klite.embd b/klite.embd
index 8ff5ebce0..e12982e9f 100644
--- a/klite.embd
+++ b/klite.embd
@@ -12,7 +12,7 @@ Current version indicated by LITEVER below.
-->