From 2ad1e4e91718ef8f6b5d094653f2c56cef6d5e71 Mon Sep 17 00:00:00 2001 From: Carl-Robert Linnupuu Date: Thu, 28 Mar 2024 11:29:45 +0200 Subject: [PATCH] refactor: code completion request creation to use CodeCompletionRequestFactory for better abstraction --- .../codegpt/completions/CompletionRequestService.java | 11 +++++++---- ...estProvider.kt => CodeCompletionRequestFactory.kt} | 10 ++++------ 2 files changed, 11 insertions(+), 10 deletions(-) rename src/main/kotlin/ee/carlrobert/codegpt/codecompletions/{CodeCompletionRequestProvider.kt => CodeCompletionRequestFactory.kt} (83%) diff --git a/src/main/java/ee/carlrobert/codegpt/completions/CompletionRequestService.java b/src/main/java/ee/carlrobert/codegpt/completions/CompletionRequestService.java index 229c9ee8..5805921b 100644 --- a/src/main/java/ee/carlrobert/codegpt/completions/CompletionRequestService.java +++ b/src/main/java/ee/carlrobert/codegpt/completions/CompletionRequestService.java @@ -10,7 +10,7 @@ import static ee.carlrobert.codegpt.settings.service.ServiceType.YOU; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.components.Service; import com.intellij.openapi.diagnostic.Logger; -import ee.carlrobert.codegpt.codecompletions.CodeCompletionRequestProvider; +import ee.carlrobert.codegpt.codecompletions.CodeCompletionRequestFactory; import ee.carlrobert.codegpt.codecompletions.InfillRequestDetails; import ee.carlrobert.codegpt.completions.llama.LlamaModel; import ee.carlrobert.codegpt.completions.llama.PromptTemplate; @@ -97,12 +97,15 @@ public final class CompletionRequestService { public EventSource getCodeCompletionAsync( InfillRequestDetails requestDetails, CompletionEventListener eventListener) { - var requestProvider = new CodeCompletionRequestProvider(requestDetails); return switch (GeneralSettings.getCurrentState().getSelectedService()) { case OPENAI -> CompletionClientProvider.getOpenAIClient() - .getCompletionAsync(requestProvider.buildOpenAIRequest(), eventListener); + .getCompletionAsync( + CodeCompletionRequestFactory.INSTANCE.buildOpenAIRequest(requestDetails), + eventListener); case LLAMA_CPP -> CompletionClientProvider.getLlamaClient() - .getChatCompletionAsync(requestProvider.buildLlamaRequest(), eventListener); + .getChatCompletionAsync( + CodeCompletionRequestFactory.INSTANCE.buildLlamaRequest(requestDetails), + eventListener); default -> throw new IllegalArgumentException("Code completion not supported for selected service"); }; diff --git a/src/main/kotlin/ee/carlrobert/codegpt/codecompletions/CodeCompletionRequestProvider.kt b/src/main/kotlin/ee/carlrobert/codegpt/codecompletions/CodeCompletionRequestFactory.kt similarity index 83% rename from src/main/kotlin/ee/carlrobert/codegpt/codecompletions/CodeCompletionRequestProvider.kt rename to src/main/kotlin/ee/carlrobert/codegpt/codecompletions/CodeCompletionRequestFactory.kt index 01a6d51d..0738062c 100644 --- a/src/main/kotlin/ee/carlrobert/codegpt/codecompletions/CodeCompletionRequestProvider.kt +++ b/src/main/kotlin/ee/carlrobert/codegpt/codecompletions/CodeCompletionRequestFactory.kt @@ -5,12 +5,10 @@ import ee.carlrobert.codegpt.settings.service.llama.LlamaSettings import ee.carlrobert.llm.client.llama.completion.LlamaCompletionRequest import ee.carlrobert.llm.client.openai.completion.request.OpenAITextCompletionRequest -class CodeCompletionRequestProvider(private val details: InfillRequestDetails) { - companion object { - private const val MAX_TOKENS = 24 - } +object CodeCompletionRequestFactory { + private const val MAX_TOKENS = 128 - fun buildOpenAIRequest(): OpenAITextCompletionRequest { + fun buildOpenAIRequest(details: InfillRequestDetails): OpenAITextCompletionRequest { return OpenAITextCompletionRequest.Builder(details.prefix) .setSuffix(details.suffix) .setStream(true) @@ -19,7 +17,7 @@ class CodeCompletionRequestProvider(private val details: InfillRequestDetails) { .build() } - fun buildLlamaRequest(): LlamaCompletionRequest { + fun buildLlamaRequest(details: InfillRequestDetails): LlamaCompletionRequest { val promptTemplate = getLlamaInfillPromptTemplate() val prompt = promptTemplate.buildPrompt(details.prefix, details.suffix) return LlamaCompletionRequest.Builder(prompt)