diff --git a/colab.ipynb b/colab.ipynb
index 61ead6a00..09eac333b 100644
--- a/colab.ipynb
+++ b/colab.ipynb
@@ -1,130 +1,136 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "view-in-github"
- },
- "source": [
- " "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "2FCn5tmpn3UV"
- },
- "source": [
- "## Welcome to the Official KoboldCpp Colab Notebook\n",
- "It's really easy to get started. Just press the two **Play** buttons below, and then connect to the **Cloudflare URL** shown at the end.\n",
- "You can select a model from the dropdown, or enter a **custom URL** to a GGUF model (Example: `https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_M.gguf`)\n",
- "\n",
- "**Keep this page open and occationally check for captcha's so that your AI is not shut down**"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "QNaj3u0jn3UW"
- },
- "outputs": [],
- "source": [
- "#@title <-- Tap this if you play on Mobile { display-mode: \"form\" }\n",
- "%%html\n",
- "Press play on the music player to keep the tab alive, then start KoboldCpp below \n",
- ""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "cellView": "form",
- "id": "uJS9i_Dltv8Y"
- },
- "outputs": [],
- "source": [
- "#@title v-- Enter your model below and then click this to start Koboldcpp \r\n",
- "\r\n",
- "Model = \"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\" #@param [\"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\",\"https://huggingface.co/KoboldAI/LLaMA2-13B-Estopia-GGUF/resolve/main/LLaMA2-13B-Estopia.Q4_K_S.gguf\",\"https://huggingface.co/mradermacher/Fimbulvetr-11B-v2-GGUF/resolve/main/Fimbulvetr-11B-v2.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF/resolve/main/mythomax-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/ReMM-SLERP-L2-13B-GGUF/resolve/main/remm-slerp-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/Xwin-LM-13B-v0.2-GGUF/resolve/main/xwin-lm-13b-v0.2.Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/mini-magnum-12b-v1.1-GGUF/resolve/main/mini-magnum-12b-v1.1.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/Stheno-L2-13B-GGUF/resolve/main/stheno-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-Kimiko-v2-13B-GGUF/resolve/main/mythomax-l2-kimiko-v2-13b.Q4_K_M.gguf\",\"https://huggingface.co/bartowski/Rocinante-12B-v1.1-GGUF/resolve/main/Rocinante-12B-v1.1-Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MistRP-Airoboros-7B-GGUF/resolve/main/mistrp-airoboros-7b.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/airoboros-mistral2.2-7B-GGUF/resolve/main/airoboros-mistral2.2-7b.Q4_K_S.gguf\",\"https://huggingface.co/concedo/KobbleTinyV2-1.1B-GGUF/resolve/main/KobbleTiny-Q4_K.gguf\",\"https://huggingface.co/grimjim/kukulemon-7B-GGUF/resolve/main/kukulemon-7B.Q8_0.gguf\",\"https://huggingface.co/mradermacher/LemonKunoichiWizardV3-GGUF/resolve/main/LemonKunoichiWizardV3.Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Kunoichi-DPO-v2-7B-GGUF-Imatrix/resolve/main/Kunoichi-DPO-v2-7B-Q4_K_M-imatrix.gguf\",\"https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-i1-GGUF/resolve/main/L3-8B-Stheno-v3.2.i1-Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Llama-3-Lumimaid-8B-v0.1-OAS-GGUF-IQ-Imatrix/resolve/main/v2-Llama-3-Lumimaid-8B-v0.1-OAS-Q4_K_M-imat.gguf\",\"https://huggingface.co/bartowski/NeuralDaredevil-8B-abliterated-GGUF/resolve/main/NeuralDaredevil-8B-abliterated-Q4_K_M.gguf\",\"https://huggingface.co/bartowski/L3-8B-Lunaris-v1-GGUF/resolve/main/L3-8B-Lunaris-v1-Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2.0-8B-GGUF/resolve/main/L3-Umbral-Mind-RP-v2.0-8B.Q4_K_M.gguf\"]{allow-input: true}\r\n",
- "Layers = 99 #@param [99]{allow-input: true}\r\n",
- "ContextSize = 4096 #@param [4096,8192] {allow-input: true}\r\n",
- "FlashAttention = True #@param {type:\"boolean\"}\r\n",
- "FACommand = \"\"\r\n",
- "#@markdown \r\n",
- "LoadVisionMMProjector = False #@param {type:\"boolean\"}\r\n",
- "Mmproj = \"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\" #@param [\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/mistral-7b-mmproj-v1.5-Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-7b-mmproj-v1.5-Q4_0.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/LLaMA3-8B_mmproj-Q4_1.gguf\"]{allow-input: true}\r\n",
- "VCommand = \"\"\r\n",
- "#@markdown \r\n",
- "LoadImgModel = False #@param {type:\"boolean\"}\r\n",
- "ImgModel = \"https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\" #@param [\"https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\"]{allow-input: true}\r\n",
- "SCommand = \"\"\r\n",
- "#@markdown \r\n",
- "LoadSpeechModel = False #@param {type:\"boolean\"}\r\n",
- "SpeechModel = \"https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\" #@param [\"https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\"]{allow-input: true}\r\n",
- "WCommand = \"\"\r\n",
- "\r\n",
- "import os\r\n",
- "if not os.path.isfile(\"/opt/bin/nvidia-smi\"):\r\n",
- " raise RuntimeError(\"⚠️Colab did not give you a GPU due to usage limits, this can take a few hours before they let you back in. Check out https://lite.koboldai.net for a free alternative (that does not provide an API link but can load KoboldAI saves and chat cards) or subscribe to Colab Pro for immediate access.⚠️\")\r\n",
- "\r\n",
- "%cd /content\r\n",
- "if Mmproj and LoadVisionMMProjector:\r\n",
- " VCommand = \"--mmproj vmodel.gguf\"\r\n",
- "else:\r\n",
- " SCommand = \"\"\r\n",
- "if ImgModel and LoadImgModel:\r\n",
- " SCommand = \"--sdmodel imodel.gguf --sdthreads 4 --sdquant --sdclamped\"\r\n",
- "else:\r\n",
- " SCommand = \"\"\r\n",
- "if SpeechModel and LoadSpeechModel:\r\n",
- " WCommand = \"--whispermodel wmodel.bin\"\r\n",
- "else:\r\n",
- " WCommand = \"\"\r\n",
- "if FlashAttention:\r\n",
- " FACommand = \"--flashattention\"\r\n",
- "else:\r\n",
- " FACommand = \"\"\r\n",
- "\r\n",
- "!echo Downloading KoboldCpp, please wait...\r\n",
- "!wget -O dlfile.tmp https://kcpplinux.concedo.workers.dev && mv dlfile.tmp koboldcpp_linux\r\n",
- "!test -f koboldcpp_linux && echo Download Successful || echo Download Failed\r\n",
- "!chmod +x ./koboldcpp_linux\r\n",
- "!apt update\r\n",
- "!apt install aria2 -y\r\n",
- "# simple fix for a common URL mistake\r\n",
- "if \"https://huggingface.co/\" in Model and \"/blob/main/\" in Model: \r\n",
- " Model = Model.replace(\"/blob/main/\", \"/resolve/main/\")\r\n",
- "!aria2c -x 10 -o model.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Model\r\n",
- "if VCommand:\r\n",
- " !aria2c -x 10 -o vmodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Mmproj\r\n",
- "if SCommand:\r\n",
- " !aria2c -x 10 -o imodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $ImgModel\r\n",
- "if WCommand:\r\n",
- " !aria2c -x 10 -o wmodel.bin --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $SpeechModel\r\n",
- "!./koboldcpp_linux model.gguf --usecublas 0 mmq --multiuser --gpulayers $Layers --contextsize $ContextSize --quiet --remotetunnel $FACommand $VCommand $SCommand $WCommand\r\n"
- ]
- }
- ],
- "metadata": {
- "accelerator": "GPU",
- "colab": {
- "cell_execution_strategy": "setup",
- "gpuType": "T4",
- "include_colab_link": true,
- "private_outputs": true,
- "provenance": []
- },
- "kernelspec": {
- "display_name": "Python 3",
- "name": "python3"
- },
- "language_info": {
- "name": "python"
- }
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "view-in-github"
+ },
+ "source": [
+ " "
+ ]
},
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "2FCn5tmpn3UV"
+ },
+ "source": [
+ "## Welcome to the Official KoboldCpp Colab Notebook\n",
+ "It's really easy to get started. Just press the two **Play** buttons below, and then connect to the **Cloudflare URL** shown at the end.\n",
+ "You can select a model from the dropdown, or enter a **custom URL** to a GGUF model (Example: `https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_M.gguf`)\n",
+ "\n",
+ "**Keep this page open and occationally check for captcha's so that your AI is not shut down**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "QNaj3u0jn3UW"
+ },
+ "outputs": [],
+ "source": [
+ "#@title <-- Tap this if you play on Mobile { display-mode: \"form\" }\n",
+ "%%html\n",
+ "Press play on the music player to keep the tab alive, then start KoboldCpp below \n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "cellView": "form",
+ "id": "uJS9i_Dltv8Y"
+ },
+ "outputs": [],
+ "source": [
+ "#@title v-- Enter your model below and then click this to start Koboldcpp \n",
+ "\n",
+ "Model = \"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\" #@param [\"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\",\"https://huggingface.co/KoboldAI/LLaMA2-13B-Estopia-GGUF/resolve/main/LLaMA2-13B-Estopia.Q4_K_S.gguf\",\"https://huggingface.co/mradermacher/Fimbulvetr-11B-v2-GGUF/resolve/main/Fimbulvetr-11B-v2.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF/resolve/main/mythomax-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/ReMM-SLERP-L2-13B-GGUF/resolve/main/remm-slerp-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/Xwin-LM-13B-v0.2-GGUF/resolve/main/xwin-lm-13b-v0.2.Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/mini-magnum-12b-v1.1-GGUF/resolve/main/mini-magnum-12b-v1.1.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/Stheno-L2-13B-GGUF/resolve/main/stheno-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-Kimiko-v2-13B-GGUF/resolve/main/mythomax-l2-kimiko-v2-13b.Q4_K_M.gguf\",\"https://huggingface.co/bartowski/Rocinante-12B-v1.1-GGUF/resolve/main/Rocinante-12B-v1.1-Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MistRP-Airoboros-7B-GGUF/resolve/main/mistrp-airoboros-7b.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/airoboros-mistral2.2-7B-GGUF/resolve/main/airoboros-mistral2.2-7b.Q4_K_S.gguf\",\"https://huggingface.co/concedo/KobbleTinyV2-1.1B-GGUF/resolve/main/KobbleTiny-Q4_K.gguf\",\"https://huggingface.co/grimjim/kukulemon-7B-GGUF/resolve/main/kukulemon-7B.Q8_0.gguf\",\"https://huggingface.co/mradermacher/LemonKunoichiWizardV3-GGUF/resolve/main/LemonKunoichiWizardV3.Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Kunoichi-DPO-v2-7B-GGUF-Imatrix/resolve/main/Kunoichi-DPO-v2-7B-Q4_K_M-imatrix.gguf\",\"https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-i1-GGUF/resolve/main/L3-8B-Stheno-v3.2.i1-Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Llama-3-Lumimaid-8B-v0.1-OAS-GGUF-IQ-Imatrix/resolve/main/v2-Llama-3-Lumimaid-8B-v0.1-OAS-Q4_K_M-imat.gguf\",\"https://huggingface.co/bartowski/NeuralDaredevil-8B-abliterated-GGUF/resolve/main/NeuralDaredevil-8B-abliterated-Q4_K_M.gguf\",\"https://huggingface.co/bartowski/L3-8B-Lunaris-v1-GGUF/resolve/main/L3-8B-Lunaris-v1-Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2.0-8B-GGUF/resolve/main/L3-Umbral-Mind-RP-v2.0-8B.Q4_K_M.gguf\"]{allow-input: true}\n",
+ "Layers = 99 #@param [99]{allow-input: true}\n",
+ "ContextSize = 4096 #@param [4096,8192] {allow-input: true}\n",
+ "FlashAttention = True #@param {type:\"boolean\"}\n",
+ "Multiplayer = False #@param {type:\"boolean\"}\n",
+ "FACommand = \"\"\n",
+ "MPCommand = \"\"\n",
+ "#@markdown \n",
+ "LoadVisionMMProjector = False #@param {type:\"boolean\"}\n",
+ "Mmproj = \"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\" #@param [\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/mistral-7b-mmproj-v1.5-Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-7b-mmproj-v1.5-Q4_0.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/LLaMA3-8B_mmproj-Q4_1.gguf\"]{allow-input: true}\n",
+ "VCommand = \"\"\n",
+ "#@markdown \n",
+ "LoadImgModel = False #@param {type:\"boolean\"}\n",
+ "ImgModel = \"https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\" #@param [\"https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\"]{allow-input: true}\n",
+ "SCommand = \"\"\n",
+ "#@markdown \n",
+ "LoadSpeechModel = False #@param {type:\"boolean\"}\n",
+ "SpeechModel = \"https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\" #@param [\"https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\"]{allow-input: true}\n",
+ "WCommand = \"\"\n",
+ "\n",
+ "import os\n",
+ "if not os.path.isfile(\"/opt/bin/nvidia-smi\"):\n",
+ " raise RuntimeError(\"⚠️Colab did not give you a GPU due to usage limits, this can take a few hours before they let you back in. Check out https://lite.koboldai.net for a free alternative (that does not provide an API link but can load KoboldAI saves and chat cards) or subscribe to Colab Pro for immediate access.⚠️\")\n",
+ "\n",
+ "%cd /content\n",
+ "if Mmproj and LoadVisionMMProjector:\n",
+ " VCommand = \"--mmproj vmodel.gguf\"\n",
+ "else:\n",
+ " SCommand = \"\"\n",
+ "if ImgModel and LoadImgModel:\n",
+ " SCommand = \"--sdmodel imodel.gguf --sdthreads 4 --sdquant --sdclamped\"\n",
+ "else:\n",
+ " SCommand = \"\"\n",
+ "if SpeechModel and LoadSpeechModel:\n",
+ " WCommand = \"--whispermodel wmodel.bin\"\n",
+ "else:\n",
+ " WCommand = \"\"\n",
+ "if FlashAttention:\n",
+ " FACommand = \"--flashattention\"\n",
+ "else:\n",
+ " FACommand = \"\"\n",
+ "if Multiplayer:\n",
+ " MPCommand = \"--multiplayer\"\n",
+ "else:\n",
+ " MPCommand = \"\"\n",
+ "\n",
+ "!echo Downloading KoboldCpp, please wait...\n",
+ "!wget -O dlfile.tmp https://kcpplinux.concedo.workers.dev && mv dlfile.tmp koboldcpp_linux\n",
+ "!test -f koboldcpp_linux && echo Download Successful || echo Download Failed\n",
+ "!chmod +x ./koboldcpp_linux\n",
+ "!apt update\n",
+ "!apt install aria2 -y\n",
+ "# simple fix for a common URL mistake\n",
+ "if \"https://huggingface.co/\" in Model and \"/blob/main/\" in Model:\n",
+ " Model = Model.replace(\"/blob/main/\", \"/resolve/main/\")\n",
+ "!aria2c -x 10 -o model.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Model\n",
+ "if VCommand:\n",
+ " !aria2c -x 10 -o vmodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Mmproj\n",
+ "if SCommand:\n",
+ " !aria2c -x 10 -o imodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $ImgModel\n",
+ "if WCommand:\n",
+ " !aria2c -x 10 -o wmodel.bin --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $SpeechModel\n",
+ "!./koboldcpp_linux model.gguf --usecublas 0 mmq --multiuser --gpulayers $Layers --contextsize $ContextSize --websearch --quiet --remotetunnel $FACommand $MPCommand $VCommand $SCommand $WCommand\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "cell_execution_strategy": "setup",
+ "gpuType": "T4",
+ "include_colab_link": true,
+ "private_outputs": true,
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}