koboldcpp/colab.ipynb
Concedo 0e2b031159
Some checks failed
Copilot Setup Steps / copilot-setup-steps (push) Has been cancelled
colab cpus are too slow to run kokoro. swap back to outetts
2025-08-24 16:20:01 +08:00

203 lines
12 KiB
Text
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
},
"source": [
"<a href=\"https://colab.research.google.com/github/LostRuins/koboldcpp/blob/concedo/colab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "2FCn5tmpn3UV"
},
"source": [
"## Welcome to the Official KoboldCpp Colab Notebook\n",
"It's really easy to get started. Just press the two **Play** buttons below, and then connect to the **Cloudflare URL** shown at the end.\n",
"You can select a model from the dropdown, or enter a **custom URL** to a GGUF model (Example: `https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_M.gguf`)\n",
"\n",
"**Keep this page open and occationally check for captcha's so that your AI is not shut down**"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "QNaj3u0jn3UW"
},
"outputs": [],
"source": [
"#@title <-- Tap this if you play on Mobile { display-mode: \"form\" }\n",
"%%html\n",
"<b>Press play on the music player to keep the tab alive, then start KoboldCpp below</b><br/>\n",
"<audio autoplay=\"\" src=\"https://raw.githubusercontent.com/KoboldAI/KoboldAI-Client/main/colab/silence.m4a\" loop controls>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "uJS9i_Dltv8Y"
},
"outputs": [],
"source": [
"#@title <b>v-- Enter your model below and then click this to start Koboldcpp</b>\n",
"\n",
"Model = \"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\" #@param [\"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\",\"https://huggingface.co/KoboldAI/LLaMA2-13B-Estopia-GGUF/resolve/main/LLaMA2-13B-Estopia.Q4_K_S.gguf\",\"https://huggingface.co/mradermacher/Fimbulvetr-11B-v2-GGUF/resolve/main/Fimbulvetr-11B-v2.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF/resolve/main/mythomax-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/ReMM-SLERP-L2-13B-GGUF/resolve/main/remm-slerp-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/Xwin-LM-13B-v0.2-GGUF/resolve/main/xwin-lm-13b-v0.2.Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/mini-magnum-12b-v1.1-GGUF/resolve/main/mini-magnum-12b-v1.1.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/Stheno-L2-13B-GGUF/resolve/main/stheno-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-Kimiko-v2-13B-GGUF/resolve/main/mythomax-l2-kimiko-v2-13b.Q4_K_M.gguf\",\"https://huggingface.co/bartowski/Rocinante-12B-v1.1-GGUF/resolve/main/Rocinante-12B-v1.1-Q4_K_S.gguf\",\"https://huggingface.co/KoboldAI/Llama-3.1-8B-BookAdventures-GGUF/resolve/main/Llama-3.1-8B-BookAdventures.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MistRP-Airoboros-7B-GGUF/resolve/main/mistrp-airoboros-7b.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/airoboros-mistral2.2-7B-GGUF/resolve/main/airoboros-mistral2.2-7b.Q4_K_S.gguf\",\"https://huggingface.co/concedo/KobbleTinyV2-1.1B-GGUF/resolve/main/KobbleTiny-Q4_K.gguf\",\"https://huggingface.co/grimjim/kukulemon-7B-GGUF/resolve/main/kukulemon-7B.Q8_0.gguf\",\"https://huggingface.co/mradermacher/LemonKunoichiWizardV3-GGUF/resolve/main/LemonKunoichiWizardV3.Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Kunoichi-DPO-v2-7B-GGUF-Imatrix/resolve/main/Kunoichi-DPO-v2-7B-Q4_K_M-imatrix.gguf\",\"https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-i1-GGUF/resolve/main/L3-8B-Stheno-v3.2.i1-Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Llama-3-Lumimaid-8B-v0.1-OAS-GGUF-IQ-Imatrix/resolve/main/v2-Llama-3-Lumimaid-8B-v0.1-OAS-Q4_K_M-imat.gguf\",\"https://huggingface.co/bartowski/NeuralDaredevil-8B-abliterated-GGUF/resolve/main/NeuralDaredevil-8B-abliterated-Q4_K_M.gguf\",\"https://huggingface.co/bartowski/L3-8B-Lunaris-v1-GGUF/resolve/main/L3-8B-Lunaris-v1-Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2.0-8B-GGUF/resolve/main/L3-Umbral-Mind-RP-v2.0-8B.Q4_K_M.gguf\",\"https://huggingface.co/bartowski/TheDrummer_Cydonia-24B-v2-GGUF/resolve/main/TheDrummer_Cydonia-24B-v2-Q4_K_S.gguf\",\"https://huggingface.co/bartowski/PocketDoc_Dans-PersonalityEngine-V1.2.0-24b-GGUF/resolve/main/PocketDoc_Dans-PersonalityEngine-V1.2.0-24b-IQ4_XS.gguf\"]{allow-input: true}\n",
"Layers = 99 #@param [99]{allow-input: true}\n",
"ContextSize = 4096 #@param [4096,8192] {allow-input: true}\n",
"FlashAttention = True #@param {type:\"boolean\"}\n",
"Multiplayer = False #@param {type:\"boolean\"}\n",
"DeleteExistingModels = True #@param {type:\"boolean\"}\n",
"FACommand = \"\"\n",
"MPCommand = \"\"\n",
"#@markdown <hr>\n",
"LoadVisionMMProjector = False #@param {type:\"boolean\"}\n",
"Mmproj = \"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\" #@param [\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/mistral-7b-mmproj-v1.5-Q4_1.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-7b-mmproj-v1.5-Q4_0.gguf\",\"https://huggingface.co/koboldcpp/mmproj/resolve/main/LLaMA3-8B_mmproj-Q4_1.gguf\"]{allow-input: true}\n",
"VCommand = \"\"\n",
"#@markdown <hr>\n",
"LoadImgModel = False #@param {type:\"boolean\"}\n",
"ImgModel = \"https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\" #@param [\"https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\"]{allow-input: true}\n",
"SCommand = \"\"\n",
"#@markdown <hr>\n",
"LoadSpeechModel = False #@param {type:\"boolean\"}\n",
"SpeechModel = \"https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\" #@param [\"https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\"]{allow-input: true}\n",
"WCommand = \"\"\n",
"#@markdown <hr>\n",
"LoadTTSModel = False #@param {type:\"boolean\"}\n",
"TTSModel = \"https://huggingface.co/koboldcpp/tts/resolve/main/OuteTTS-0.2-500M-Q4_0.gguf\" #@param [\"https://huggingface.co/koboldcpp/tts/resolve/main/OuteTTS-0.2-500M-Q4_0.gguf\",\"https://huggingface.co/koboldcpp/tts/resolve/main/Kokoro_no_espeak_Q4.gguf\"]{allow-input: true}\n",
"WavTokModel = \"https://huggingface.co/koboldcpp/tts/resolve/main/WavTokenizer-Large-75-Q4_0.gguf\" #@param [\"https://huggingface.co/koboldcpp/tts/resolve/main/WavTokenizer-Large-75-Q4_0.gguf\"]{allow-input: true}\n",
"TTSCommand = \"\"\n",
"#@markdown <hr>\n",
"LoadEmbeddingsModel = False #@param {type:\"boolean\"}\n",
"EmbeddingsModel = \"https://huggingface.co/yixuan-chia/snowflake-arctic-embed-s-GGUF/resolve/main/snowflake-arctic-embed-s-Q4_0.gguf\" #@param [\"https://huggingface.co/yixuan-chia/snowflake-arctic-embed-s-GGUF/resolve/main/snowflake-arctic-embed-s-Q4_0.gguf\"]{allow-input: true}\n",
"ECommand = \"\"\n",
"#@markdown <hr>\n",
"#@markdown This enables saving stories directly to your google drive. You will have to grant permissions, and then you can access the saves from the \"KoboldCpp Server Storage\" option.\n",
"AllowSaveToGoogleDrive = False #@param {type:\"boolean\"}\n",
"SavGdriveCommand = \"\"\n",
"#@markdown <hr>\n",
"#@markdown Only select the following box if regular cloudflare tunnel fails to work. It will generate an inferior localtunnel tunnel, which you can use after entering a password.\n",
"MakeLocalTunnelFallback = False #@param {type:\"boolean\"}\n",
"\n",
"import os, glob\n",
"if not os.path.isfile(\"/opt/bin/nvidia-smi\"):\n",
" raise RuntimeError(\"⚠Colab did not give you a GPU due to usage limits, this can take a few hours before they let you back in. Check out https://lite.koboldai.net for a free alternative (that does not provide an API link but can load KoboldAI saves and chat cards) or subscribe to Colab Pro for immediate access.⚠️\")\n",
"\n",
"if AllowSaveToGoogleDrive:\n",
" print(\"Attempting to request access to save to your google drive...\")\n",
" try:\n",
" from google.colab import drive\n",
" import os, json\n",
" drive.mount('/content/drive', force_remount=True)\n",
" if not os.path.exists(\"/content/drive/MyDrive\"):\n",
" raise RuntimeError(\"Google Drive mount failed. Please grant permissions and try again.\")\n",
" kcppdir = '/content/drive/MyDrive/koboldcpp_data'\n",
" os.makedirs(kcppdir, exist_ok=True)\n",
" savedatapath = os.path.join(kcppdir, \"koboldcpp_save_db.jsondb\")\n",
" if not os.path.exists(savedatapath):\n",
" settings_data = {}\n",
" with open(savedatapath, \"w\") as json_file:\n",
" json.dump(settings_data, json_file, indent=4)\n",
" print(f\"Created new koboldcpp_save_db.jsondb at {savedatapath}\")\n",
" else:\n",
" print(f\"Loading saved data at {savedatapath}\")\n",
" SavGdriveCommand = f\" --savedatafile {savedatapath}\"\n",
" except Exception as e:\n",
" print(f\"⚠️ Error: {e}\")\n",
" print(\"Please ensure you grant Google Drive permissions and try again.\")\n",
"\n",
"%cd /content\n",
"if Mmproj and LoadVisionMMProjector:\n",
" VCommand = f\"--mmproj {Mmproj}\"\n",
"else:\n",
" SCommand = \"\"\n",
"if ImgModel and LoadImgModel:\n",
" SCommand = f\"--sdmodel {ImgModel} --sdthreads 4 --sdquant --sdclamped\"\n",
"else:\n",
" SCommand = \"\"\n",
"if SpeechModel and LoadSpeechModel:\n",
" WCommand = f\"--whispermodel {SpeechModel}\"\n",
"else:\n",
" WCommand = \"\"\n",
"if TTSModel and WavTokModel and LoadTTSModel:\n",
" TTSCommand = f\"--ttsmodel {TTSModel} --ttswavtokenizer {WavTokModel} --ttsgpu\"\n",
"elif TTSModel and LoadTTSModel:\n",
" TTSCommand = f\"--ttsmodel {TTSModel} --ttsgpu\"\n",
"else:\n",
" TTSCommand = \"\"\n",
"if EmbeddingsModel and LoadEmbeddingsModel:\n",
" ECommand = f\"--embeddingsmodel {EmbeddingsModel}\"\n",
"else:\n",
" ECommand = \"\"\n",
"if FlashAttention:\n",
" FACommand = \"--flashattention\"\n",
"else:\n",
" FACommand = \"\"\n",
"if Multiplayer:\n",
" MPCommand = \"--multiplayer\"\n",
"else:\n",
" MPCommand = \"\"\n",
"\n",
"ModelCommand = f\"--model {Model}\"\n",
"\n",
"if DeleteExistingModels:\n",
" print(\"Deleting all cached models to redownload...\")\n",
" patterns = ['*.gguf', '*.bin', '*.ggml']\n",
" for pattern in patterns:\n",
" for file_path in glob.glob(pattern):\n",
" try:\n",
" os.remove(file_path)\n",
" print(f\"Deleted: {file_path}\")\n",
" except Exception as e:\n",
" print(f\"Failed to delete {file_path}: {e}\")\n",
"\n",
"\n",
"!echo Downloading KoboldCpp, please wait...\n",
"!wget -O dlfile.tmp https://kcpplinux.concedo.workers.dev && mv dlfile.tmp koboldcpp_linux\n",
"!test -f koboldcpp_linux && echo Download Successful || echo Download Failed\n",
"!chmod +x ./koboldcpp_linux\n",
"!apt update\n",
"!apt install aria2 -y\n",
"\n",
"if MakeLocalTunnelFallback:\n",
" import urllib\n",
" print(\"Trying to use LocalTunnel as a fallback tunnel (not so good)...\")\n",
" ltpw = urllib.request.urlopen('https://loca.lt/mytunnelpassword').read().decode('utf8').strip(\"\\n\")\n",
" !nohup npx --yes localtunnel --port 5001 > lt.log 2>&1 &\n",
" !sleep 8\n",
" print(\"=================\")\n",
" print(\"(LocalTunnel Results)\")\n",
" !cat lt.log\n",
" print(f\"Please open the above link, and input the password '{ltpw}'\\nYour KoboldCpp will start shortly...\")\n",
" print(\"=================\")\n",
" !sleep 10\n",
"!./koboldcpp_linux $ModelCommand --usecuda 0 mmq --chatcompletionsadapter AutoGuess --multiuser --gpulayers $Layers --contextsize $ContextSize --websearch --quiet --remotetunnel $FACommand $MPCommand $VCommand $SCommand $WCommand $TTSCommand $ECommand $SavGdriveCommand\n"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"cell_execution_strategy": "setup",
"gpuType": "T4",
"include_colab_link": true,
"private_outputs": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}