mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 17:44:38 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .devops/nix/package.nix # .github/workflows/build.yml # .github/workflows/server.yml # CMakeLists.txt # Makefile # README.md # requirements.txt # scripts/LlamaConfig.cmake.in
This commit is contained in:
commit
52f9911240
31 changed files with 10838 additions and 5366 deletions
|
@ -48,7 +48,7 @@ The project is under active development, and we are [looking for feedback and co
|
|||
- `--api-key`: Set an api key for request authorization. By default, the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token. May be used multiple times to enable multiple valid keys.
|
||||
- `--api-key-file`: Path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access. May be used in conjunction with `--api-key`s.
|
||||
- `--embeddings`: Enable embedding vector output and the OAI compatible endpoint /v1/embeddings. Physical batch size (`--ubatch-size`) must be carefully defined. Default: disabled
|
||||
- `-np N`, `--parallel N`: Set the number of slots for process requests. Default: `1`
|
||||
- `-np N`, `--parallel N`: Set the number of slots for process requests. Default: `1`. Values > 1 will allow for higher throughput with multiple parallel requests but the results will **not** be deterministic due to differences in rounding error.
|
||||
- `-cb`, `--cont-batching`: Enable continuous batching (a.k.a dynamic batching). Default: disabled
|
||||
- `-spf FNAME`, `--system-prompt-file FNAME` Set a file to load a system prompt (initial prompt of all slots). This is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
||||
- `--mmproj MMPROJ_FILE`: Path to a multimodal projector file for LLaVA.
|
||||
|
|
|
@ -103,7 +103,6 @@ struct slot_params {
|
|||
bool stream = true;
|
||||
bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
|
||||
|
||||
uint32_t seed = -1; // RNG seed
|
||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||
int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
|
||||
int32_t n_predict = -1; // new tokens to predict
|
||||
|
@ -1265,7 +1264,7 @@ struct server_context {
|
|||
{"n_ctx", slot.n_ctx},
|
||||
{"n_predict", slot.n_predict},
|
||||
{"model", params.model_alias},
|
||||
{"seed", slot.params.seed},
|
||||
{"seed", slot.sparams.seed},
|
||||
{"temperature", slot.sparams.temp},
|
||||
{"dynatemp_range", slot.sparams.dynatemp_range},
|
||||
{"dynatemp_exponent", slot.sparams.dynatemp_exponent},
|
||||
|
@ -1983,8 +1982,7 @@ struct server_context {
|
|||
slot.state = SLOT_STATE_PROCESSING;
|
||||
slot.command = SLOT_COMMAND_NONE;
|
||||
slot.release();
|
||||
slot.print_timings();
|
||||
send_final_response(slot);
|
||||
send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -13,6 +13,7 @@ Feature: Results
|
|||
|
||||
Scenario Outline: consistent results with same seed
|
||||
Given <n_slots> slots
|
||||
And 1.0 temperature
|
||||
Then the server is starting
|
||||
Then the server is healthy
|
||||
|
||||
|
@ -26,10 +27,12 @@ Feature: Results
|
|||
Examples:
|
||||
| n_slots |
|
||||
| 1 |
|
||||
| 2 |
|
||||
# FIXME: unified KV cache nondeterminism
|
||||
# | 2 |
|
||||
|
||||
Scenario Outline: different results with different seed
|
||||
Given <n_slots> slots
|
||||
And 1.0 temperature
|
||||
Then the server is starting
|
||||
Then the server is healthy
|
||||
|
||||
|
@ -70,12 +73,46 @@ Feature: Results
|
|||
Then all predictions are equal
|
||||
Examples:
|
||||
| n_parallel | temp |
|
||||
| 1 | 0.0 |
|
||||
| 2 | 0.0 |
|
||||
| 4 | 0.0 |
|
||||
| 1 | 1.0 |
|
||||
# FIXME: These tests fail on master. The problem seems to be the unified KV cache.
|
||||
| 1 | 0.0 |
|
||||
| 1 | 1.0 |
|
||||
# FIXME: unified KV cache nondeterminism
|
||||
# See https://github.com/ggerganov/whisper.cpp/issues/1941#issuecomment-1986923227
|
||||
# and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574 .
|
||||
# | 2 | 1.0 |
|
||||
# | 4 | 1.0 |
|
||||
# and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574
|
||||
# and https://github.com/ggerganov/llama.cpp/pull/7347 .
|
||||
# | 2 | 0.0 |
|
||||
# | 4 | 0.0 |
|
||||
# | 2 | 1.0 |
|
||||
# | 4 | 1.0 |
|
||||
|
||||
Scenario Outline: consistent token probs with same seed and prompt
|
||||
Given <n_slots> slots
|
||||
And <n_kv> KV cache size
|
||||
And 1.0 temperature
|
||||
And <n_predict> max tokens to predict
|
||||
Then the server is starting
|
||||
Then the server is healthy
|
||||
|
||||
Given 1 prompts "The meaning of life is" with seed 42
|
||||
And concurrent completion requests
|
||||
# Then the server is busy # Not all slots will be utilized.
|
||||
Then the server is idle
|
||||
And all slots are idle
|
||||
|
||||
Given <n_parallel> prompts "The meaning of life is" with seed 42
|
||||
And concurrent completion requests
|
||||
# Then the server is busy # Not all slots will be utilized.
|
||||
Then the server is idle
|
||||
And all slots are idle
|
||||
|
||||
Then all token probabilities are equal
|
||||
Examples:
|
||||
| n_slots | n_kv | n_predict | n_parallel |
|
||||
| 4 | 1024 | 1 | 1 |
|
||||
# FIXME: unified KV cache nondeterminism
|
||||
# See https://github.com/ggerganov/whisper.cpp/issues/1941#issuecomment-1986923227
|
||||
# and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574
|
||||
# and https://github.com/ggerganov/llama.cpp/pull/7347 .
|
||||
# | 4 | 1024 | 1 | 4 |
|
||||
# | 4 | 1024 | 100 | 1 |
|
||||
# This test still fails even the above patches; the first token probabilities are already different.
|
||||
# | 4 | 1024 | 100 | 4 |
|
||||
|
|
|
@ -37,8 +37,8 @@ Feature: llama.cpp server
|
|||
|
||||
Examples: Prompts
|
||||
| prompt | n_predict | re_content | n_prompt | n_predicted | truncated |
|
||||
| I believe the meaning of life is | 8 | (read\|going)+ | 18 | 8 | not |
|
||||
| Write a joke about AI from a very long prompt which will not be truncated | 256 | (princesses\|everyone\|kids\|Anna\|forest)+ | 46 | 64 | not |
|
||||
| I believe the meaning of life is | 8 | (read\|going\|pretty)+ | 18 | 8 | not |
|
||||
| Write a joke about AI from a very long prompt which will not be truncated | 256 | (princesses\|everyone\|kids\|Anna\|forest)+ | 45 | 64 | not |
|
||||
|
||||
Scenario: Completion prompt truncated
|
||||
Given a prompt:
|
||||
|
@ -67,8 +67,8 @@ Feature: llama.cpp server
|
|||
|
||||
Examples: Prompts
|
||||
| model | system_prompt | user_prompt | max_tokens | re_content | n_prompt | n_predicted | enable_streaming | truncated |
|
||||
| llama-2 | Book | What is the best book | 8 | (Here\|what)+ | 77 | 8 | disabled | not |
|
||||
| codellama70b | You are a coding assistant. | Write the fibonacci function in c++. | 128 | (thanks\|happy\|bird\|Annabyear)+ | -1 | 64 | enabled | |
|
||||
| llama-2 | Book | What is the best book | 8 | (Here\|what)+ | 76 | 8 | disabled | not |
|
||||
| codellama70b | You are a coding assistant. | Write the fibonacci function in c++. | 128 | (thanks\|happy\|bird\|fireplace)+ | -1 | 64 | enabled | |
|
||||
|
||||
|
||||
Scenario Outline: OAI Compatibility w/ response format
|
||||
|
@ -84,7 +84,7 @@ Feature: llama.cpp server
|
|||
| response_format | n_predicted | re_content |
|
||||
| {"type": "json_object", "schema": {"const": "42"}} | 5 | "42" |
|
||||
| {"type": "json_object", "schema": {"items": [{"type": "integer"}]}} | 10 | \[ -300 \] |
|
||||
| {"type": "json_object"} | 10 | \{ " Jacky. |
|
||||
| {"type": "json_object"} | 10 | \{ " Saragine. |
|
||||
|
||||
|
||||
Scenario: Tokenize / Detokenize
|
||||
|
|
|
@ -26,7 +26,7 @@ Feature: llama.cpp server slot management
|
|||
# Since we have cache, this should only process the last tokens
|
||||
Given a user prompt "What is the capital of Germany?"
|
||||
And a completion request with no api error
|
||||
Then 24 tokens are predicted matching (Thank|special)
|
||||
Then 24 tokens are predicted matching (Thank|special|Lily)
|
||||
And 7 prompt tokens are processed
|
||||
# Loading the original cache into slot 0,
|
||||
# we should only be processing 1 prompt token and get the same output
|
||||
|
@ -41,7 +41,7 @@ Feature: llama.cpp server slot management
|
|||
Given a user prompt "What is the capital of Germany?"
|
||||
And using slot id 1
|
||||
And a completion request with no api error
|
||||
Then 24 tokens are predicted matching (Thank|special)
|
||||
Then 24 tokens are predicted matching (Thank|special|Lily)
|
||||
And 1 prompt tokens are processed
|
||||
|
||||
Scenario: Erase Slot
|
||||
|
|
|
@ -23,6 +23,7 @@ from prometheus_client import parser
|
|||
def step_server_config(context, server_fqdn, server_port):
|
||||
context.server_fqdn = server_fqdn
|
||||
context.server_port = int(server_port)
|
||||
context.n_threads = None
|
||||
context.n_gpu_layer = None
|
||||
if 'PORT' in os.environ:
|
||||
context.server_port = int(os.environ['PORT'])
|
||||
|
@ -109,6 +110,11 @@ def step_n_gpu_layer(context, ngl):
|
|||
context.n_gpu_layer = ngl
|
||||
|
||||
|
||||
@step('{n_threads:d} threads')
|
||||
def step_n_threads(context, n_threads):
|
||||
context.n_thread = n_threads
|
||||
|
||||
|
||||
@step('{draft:d} as draft')
|
||||
def step_draft(context, draft):
|
||||
context.draft = draft
|
||||
|
@ -193,7 +199,7 @@ async def step_wait_for_the_server_to_be_started(context, expecting_status):
|
|||
|
||||
case 'ready' | 'idle':
|
||||
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
||||
timeout=10,
|
||||
timeout=30,
|
||||
params={'fail_on_no_slot': 0, 'include_slots': 0},
|
||||
slots_idle=context.n_slots,
|
||||
slots_processing=0,
|
||||
|
@ -274,13 +280,22 @@ async def step_predictions_equal(context):
|
|||
|
||||
@step('all predictions are different')
|
||||
@async_run_until_complete
|
||||
async def step_predictions_equal(context):
|
||||
async def step_predictions_different(context):
|
||||
n_completions = await gather_tasks_results(context)
|
||||
assert n_completions >= 2, "need at least 2 completions"
|
||||
assert_all_predictions_different(context.tasks_result)
|
||||
context.tasks_result = []
|
||||
|
||||
|
||||
@step('all token probabilities are equal')
|
||||
@async_run_until_complete
|
||||
async def step_token_probabilities_equal(context):
|
||||
n_completions = await gather_tasks_results(context)
|
||||
assert n_completions >= 2, "need at least 2 completions"
|
||||
assert_all_token_probabilities_equal(context.tasks_result)
|
||||
context.tasks_result = []
|
||||
|
||||
|
||||
@step('the completion is truncated')
|
||||
def step_assert_completion_truncated(context):
|
||||
step_assert_completion_truncated(context, '')
|
||||
|
@ -868,7 +883,8 @@ async def request_completion(prompt,
|
|||
"cache_prompt": cache_prompt,
|
||||
"id_slot": id_slot,
|
||||
"seed": seed if seed is not None else 42,
|
||||
"temperature": temperature if temperature is not None else "0.8f",
|
||||
"temperature": temperature if temperature is not None else 0.8,
|
||||
"n_probs": 2,
|
||||
},
|
||||
headers=headers,
|
||||
timeout=3600) as response:
|
||||
|
@ -1123,6 +1139,23 @@ def assert_all_predictions_different(completion_responses):
|
|||
assert content_i != content_j, "contents not different"
|
||||
|
||||
|
||||
def assert_all_token_probabilities_equal(completion_responses):
|
||||
n_predict = len(completion_responses[0]['completion_probabilities'])
|
||||
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
|
||||
for pos in range(n_predict):
|
||||
for i, response_i in enumerate(completion_responses):
|
||||
probs_i = response_i['completion_probabilities'][pos]['probs']
|
||||
print(f"pos {pos}, probs {i}: {probs_i}")
|
||||
for pos in range(n_predict):
|
||||
for i, response_i in enumerate(completion_responses):
|
||||
probs_i = response_i['completion_probabilities'][pos]['probs']
|
||||
for j, response_j in enumerate(completion_responses):
|
||||
if i == j:
|
||||
continue
|
||||
probs_j = response_j['completion_probabilities'][pos]['probs']
|
||||
assert probs_i == probs_j, "contents not equal"
|
||||
|
||||
|
||||
async def gather_tasks_results(context):
|
||||
n_tasks = len(context.concurrent_tasks)
|
||||
if context.debug:
|
||||
|
@ -1261,6 +1294,8 @@ def start_server_background(context):
|
|||
server_args.extend(['--batch-size', context.n_batch])
|
||||
if context.n_ubatch:
|
||||
server_args.extend(['--ubatch-size', context.n_ubatch])
|
||||
if context.n_threads:
|
||||
server_args.extend(['--threads', context.threads])
|
||||
if context.n_gpu_layer:
|
||||
server_args.extend(['--n-gpu-layers', context.n_gpu_layer])
|
||||
if context.draft is not None:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue