refactor: code completion request creation to use CodeCompletionRequestFactory for better abstraction

This commit is contained in:
Carl-Robert Linnupuu 2024-03-28 11:29:45 +02:00
parent 6255bf9eb6
commit 2ad1e4e917
2 changed files with 11 additions and 10 deletions

View file

@ -10,7 +10,7 @@ import static ee.carlrobert.codegpt.settings.service.ServiceType.YOU;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.components.Service;
import com.intellij.openapi.diagnostic.Logger;
import ee.carlrobert.codegpt.codecompletions.CodeCompletionRequestProvider;
import ee.carlrobert.codegpt.codecompletions.CodeCompletionRequestFactory;
import ee.carlrobert.codegpt.codecompletions.InfillRequestDetails;
import ee.carlrobert.codegpt.completions.llama.LlamaModel;
import ee.carlrobert.codegpt.completions.llama.PromptTemplate;
@ -97,12 +97,15 @@ public final class CompletionRequestService {
public EventSource getCodeCompletionAsync(
InfillRequestDetails requestDetails,
CompletionEventListener<String> eventListener) {
var requestProvider = new CodeCompletionRequestProvider(requestDetails);
return switch (GeneralSettings.getCurrentState().getSelectedService()) {
case OPENAI -> CompletionClientProvider.getOpenAIClient()
.getCompletionAsync(requestProvider.buildOpenAIRequest(), eventListener);
.getCompletionAsync(
CodeCompletionRequestFactory.INSTANCE.buildOpenAIRequest(requestDetails),
eventListener);
case LLAMA_CPP -> CompletionClientProvider.getLlamaClient()
.getChatCompletionAsync(requestProvider.buildLlamaRequest(), eventListener);
.getChatCompletionAsync(
CodeCompletionRequestFactory.INSTANCE.buildLlamaRequest(requestDetails),
eventListener);
default ->
throw new IllegalArgumentException("Code completion not supported for selected service");
};

View file

@ -5,12 +5,10 @@ import ee.carlrobert.codegpt.settings.service.llama.LlamaSettings
import ee.carlrobert.llm.client.llama.completion.LlamaCompletionRequest
import ee.carlrobert.llm.client.openai.completion.request.OpenAITextCompletionRequest
class CodeCompletionRequestProvider(private val details: InfillRequestDetails) {
companion object {
private const val MAX_TOKENS = 24
}
object CodeCompletionRequestFactory {
private const val MAX_TOKENS = 128
fun buildOpenAIRequest(): OpenAITextCompletionRequest {
fun buildOpenAIRequest(details: InfillRequestDetails): OpenAITextCompletionRequest {
return OpenAITextCompletionRequest.Builder(details.prefix)
.setSuffix(details.suffix)
.setStream(true)
@ -19,7 +17,7 @@ class CodeCompletionRequestProvider(private val details: InfillRequestDetails) {
.build()
}
fun buildLlamaRequest(): LlamaCompletionRequest {
fun buildLlamaRequest(details: InfillRequestDetails): LlamaCompletionRequest {
val promptTemplate = getLlamaInfillPromptTemplate()
val prompt = promptTemplate.buildPrompt(details.prefix, details.suffix)
return LlamaCompletionRequest.Builder(prompt)