mirror of
https://github.com/carlrobertoh/ProxyAI.git
synced 2026-05-12 22:31:24 +00:00
feat: code completion improvements
- truncate context when working with bigger files - fix notification error messages - other minor fixes
This commit is contained in:
parent
f3357ff0a6
commit
abc8dc8d07
8 changed files with 109 additions and 177 deletions
|
|
@ -9,6 +9,7 @@ import com.knuddels.jtokkit.api.EncodingRegistry;
|
|||
import com.knuddels.jtokkit.api.EncodingType;
|
||||
import ee.carlrobert.codegpt.conversations.Conversation;
|
||||
import ee.carlrobert.llm.client.openai.completion.request.OpenAIChatCompletionMessage;
|
||||
import java.util.List;
|
||||
|
||||
@Service
|
||||
public final class EncodingManager {
|
||||
|
|
@ -52,4 +53,19 @@ public final class EncodingManager {
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncates the given text to the given number of tokens.
|
||||
*
|
||||
* @param text The text to truncate.
|
||||
* @param maxTokens The maximum number of tokens to keep.
|
||||
* @param fromStart Whether to truncate from the start or the end of the text.
|
||||
* @return The truncated text.
|
||||
*/
|
||||
public String truncateText(String text, int maxTokens, boolean fromStart) {
|
||||
List<Integer> tokens = encoding.encode(text);
|
||||
int tokensToRetrieve = Math.min(maxTokens, tokens.size());
|
||||
int startIndex = fromStart ? 0 : tokens.size() - tokensToRetrieve;
|
||||
return encoding.decode(tokens.subList(startIndex, startIndex + tokensToRetrieve));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue