fix: use /infill for llama.cpp code-completions (#513)

This commit is contained in:
Phil 2024-04-25 15:47:56 +02:00 committed by GitHub
parent 7d05d17797
commit 8de72b3301
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 8 additions and 9 deletions

View file

@ -122,7 +122,7 @@ public final class CompletionRequestService {
CodeCompletionRequestFactory.buildCustomRequest(requestDetails),
new OpenAITextCompletionEventSourceListener(eventListener));
case LLAMA_CPP -> CompletionClientProvider.getLlamaClient()
.getChatCompletionAsync(
.getInfillAsync(
CodeCompletionRequestFactory.buildLlamaRequest(requestDetails),
eventListener);
default ->