fix: use /infill for llama.cpp code-completions (#513)

This commit is contained in:
Phil 2024-04-25 15:47:56 +02:00 committed by GitHub
parent 7d05d17797
commit 8de72b3301
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 8 additions and 9 deletions

View file

@ -122,7 +122,7 @@ public final class CompletionRequestService {
CodeCompletionRequestFactory.buildCustomRequest(requestDetails),
new OpenAITextCompletionEventSourceListener(eventListener));
case LLAMA_CPP -> CompletionClientProvider.getLlamaClient()
.getChatCompletionAsync(
.getInfillAsync(
CodeCompletionRequestFactory.buildLlamaRequest(requestDetails),
eventListener);
default ->

View file

@ -12,6 +12,7 @@ import ee.carlrobert.codegpt.settings.service.llama.LlamaSettings
import ee.carlrobert.codegpt.settings.service.llama.LlamaSettingsState
import ee.carlrobert.codegpt.settings.service.openai.OpenAISettings
import ee.carlrobert.llm.client.llama.completion.LlamaCompletionRequest
import ee.carlrobert.llm.client.llama.completion.LlamaInfillRequest
import ee.carlrobert.llm.client.openai.completion.request.OpenAITextCompletionRequest
import okhttp3.MediaType.Companion.toMediaType
import okhttp3.Request
@ -59,16 +60,14 @@ object CodeCompletionRequestFactory {
}
@JvmStatic
fun buildLlamaRequest(details: InfillRequestDetails): LlamaCompletionRequest {
fun buildLlamaRequest(details: InfillRequestDetails): LlamaInfillRequest {
val settings = LlamaSettings.getCurrentState()
val promptTemplate = getLlamaInfillPromptTemplate(settings)
val prompt = promptTemplate.buildPrompt(details.prefix, details.suffix)
return LlamaCompletionRequest.Builder(prompt)
return LlamaInfillRequest(LlamaCompletionRequest.Builder(null)
.setN_predict(settings.codeCompletionMaxTokens)
.setStream(true)
.setTemperature(0.4)
.setStop(promptTemplate.stopTokens)
.build()
.setStop(promptTemplate.stopTokens), details.prefix, details.suffix)
}
private fun getLlamaInfillPromptTemplate(settings: LlamaSettingsState): InfillPromptTemplate {

View file

@ -35,11 +35,11 @@ class CodeCompletionServiceTest : IntegrationTest() {
${"z".repeat(247)}
""".trimIndent() // 128 tokens
expectLlama(StreamHttpExchange { request: RequestEntity ->
assertThat(request.uri.path).isEqualTo("/completion")
assertThat(request.uri.path).isEqualTo("/infill")
assertThat(request.method).isEqualTo("POST")
assertThat(request.body)
.extracting("prompt")
.isEqualTo(InfillPromptTemplate.LLAMA.buildPrompt(prefix, suffix))
.extracting("input_prefix", "input_suffix")
.containsExactly(prefix, suffix)
listOf(jsonMapResponse(e("content", expectedCompletion), e("stop", true)))
})