mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2026-05-08 01:41:37 +00:00
Merge commit 'c945aaaef2' into concedo_experimental
# Conflicts: # .devops/cann.Dockerfile # .github/workflows/build.yml # .github/workflows/release.yml # README.md # common/CMakeLists.txt # common/chat.cpp # docs/function-calling.md # ggml/src/ggml-cann/aclnn_ops.cpp # ggml/src/ggml-cann/aclnn_ops.h # ggml/src/ggml-cann/common.h # ggml/src/ggml-cann/ggml-cann.cpp # models/templates/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16.jinja # scripts/sync_vendor.py # tests/CMakeLists.txt # tests/peg-parser/tests.h # tests/test-chat-peg-parser.cpp # tests/test-chat-template.cpp # tests/test-chat.cpp # tests/testing.h # tools/llama-bench/llama-bench.cpp
This commit is contained in:
commit
8855a7f52b
23 changed files with 6659 additions and 3690 deletions
|
|
@ -8512,6 +8512,10 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
|
|||
except Exception:
|
||||
print("Could not find Embedded llama.cpp UI.")
|
||||
|
||||
if args.mcpfile and isinstance(args.mcpfile, str):
|
||||
threading.Thread(target=load_mcp_async, args=(args,), daemon=True).start()
|
||||
time.sleep(0.2) # short delay to allow get_capabilities to work
|
||||
|
||||
# print enabled modules
|
||||
caps = get_capabilities()
|
||||
enabledmlist = []
|
||||
|
|
@ -8561,9 +8565,6 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
|
|||
else:
|
||||
endpoint_url = f"{httpsaffix}://{args.host}:{args.port}"
|
||||
|
||||
if args.mcpfile and isinstance(args.mcpfile, str):
|
||||
threading.Thread(target=load_mcp_async, args=(args,), daemon=True).start()
|
||||
|
||||
if start_server:
|
||||
if not args.remotetunnel:
|
||||
print(f"Starting Kobold API on port {args.port} at {endpoint_url}/api/")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue