load embedding at current maxctx instead of max trained ctx by default

This commit is contained in:
Concedo 2025-08-13 18:42:14 +08:00
parent 06a3ee4c3b
commit 955cf66bbc
2 changed files with 4 additions and 2 deletions

View file

@ -1901,7 +1901,7 @@ def embeddings_load_model(model_filename):
inputs.flash_attention = False
inputs.threads = args.threads
inputs.use_mmap = args.usemmap
inputs.embeddingsmaxctx = args.embeddingsmaxctx
inputs.embeddingsmaxctx = (args.embeddingsmaxctx if args.embeddingsmaxctx else args.contextsize) # for us to clamp to contextsize if embeddingsmaxctx unspecified
inputs = set_backend_props(inputs)
ret = handle.embeddings_load_model(inputs)
return ret