mirror of
https://github.com/carlrobertoh/ProxyAI.git
synced 2026-05-11 04:50:31 +00:00
feat: add Qwen 2.5 Coder model
This commit is contained in:
parent
37adb90587
commit
eb36e11c5f
6 changed files with 50 additions and 3 deletions
|
|
@ -120,6 +120,18 @@ public enum HuggingFaceModel {
|
|||
CODE_QWEN_1_5_7B_Q6_K(7, 6, "Qwen_-_CodeQwen1.5-7B-Chat-gguf",
|
||||
"CodeQwen1.5-7B-Chat.Q6_K.gguf", "RichardErkhov", 6.38),
|
||||
|
||||
CODE_QWEN_2_5_1_5B_Q6_K(1, 6, "Qwen2.5-Coder-1.5B-Instruct-GGUF",
|
||||
"qwen2.5-coder-1.5b-instruct-q6_k.gguf", "Qwen", 1.46),
|
||||
CODE_QWEN_2_5_1_5B_Q8_0(1, 8, "Qwen2.5-Coder-1.5B-Instruct-GGUF",
|
||||
"qwen2.5-coder-1.5b-instruct-q8_0.gguf", "Qwen", 1.89),
|
||||
|
||||
CODE_QWEN_2_5_7B_Q4_K_M(7, 4, "Qwen2.5-Coder-7B-Instruct-GGUF",
|
||||
"Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf", "bartowski", 4.68),
|
||||
CODE_QWEN_2_5_7B_Q6_K(7, 6, "Qwen2.5-Coder-7B-Instruct-GGUF",
|
||||
"Qwen2.5-Coder-7B-Instruct-Q6_K.gguf", "bartowski", 6.25),
|
||||
CODE_QWEN_2_5_7B_Q8_0(7, 8, "Qwen2.5-Coder-7B-Instruct-GGUF",
|
||||
"Qwen2.5-Coder-7B-Instruct-Q8_0.gguf", "bartowski", 8.1),
|
||||
|
||||
STABLE_CODE_3B_Q3_K_M(SC3, 3, "stable-code-instruct-3b-Q3_K_M.gguf", 1.39),
|
||||
STABLE_CODE_3B_Q4_K_M(SC3, 4, "stable-code-instruct-3b-Q4_K_M.gguf", 1.71),
|
||||
STABLE_CODE_3B_Q5_K_M(SC3, 5, "stable-code-instruct-3b-Q5_K_M.gguf", 1.99),
|
||||
|
|
|
|||
|
|
@ -170,6 +170,23 @@ public enum LlamaModel {
|
|||
HuggingFaceModel.CODE_QWEN_1_5_7B_Q4_K_M,
|
||||
HuggingFaceModel.CODE_QWEN_1_5_7B_Q5_K_M,
|
||||
HuggingFaceModel.CODE_QWEN_1_5_7B_Q6_K)),
|
||||
CODE_QWEN2_5_CODER(
|
||||
"CodeQwen2.5 Coder", """
|
||||
Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen).\
|
||||
It brings the following improvements upon CodeQwen1.5:
|
||||
|
||||
- Significantly improvements in code generation, code reasoning and code fixing. Base on the strong Qwen2.5, we scale up the training tokens into 5.5 trillion including source code, text-code grounding, Synthetic data, etc.
|
||||
- A more comprehensive foundation for real-world applications such as Code Agents. Not only enhancing coding capabilities but also maintaining its strengths in mathematics and general competencies.
|
||||
- Long-context Support up to 128K tokens.
|
||||
""",
|
||||
PromptTemplate.CODE_QWEN,
|
||||
InfillPromptTemplate.CODE_QWEN_2_5,
|
||||
List.of(
|
||||
HuggingFaceModel.CODE_QWEN_2_5_1_5B_Q6_K,
|
||||
HuggingFaceModel.CODE_QWEN_2_5_1_5B_Q8_0,
|
||||
HuggingFaceModel.CODE_QWEN_2_5_7B_Q4_K_M,
|
||||
HuggingFaceModel.CODE_QWEN_2_5_7B_Q6_K,
|
||||
HuggingFaceModel.CODE_QWEN_2_5_7B_Q8_0)),
|
||||
STABLE_CODE(
|
||||
"Stable Code Instruct", """
|
||||
stable-code-instruct-3b is a 2.7B billion parameter decoder-only language model tuned from \
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ public enum PromptTemplate {
|
|||
.toString();
|
||||
}
|
||||
},
|
||||
CODE_QWEN("CodeQwen1.5", List.of("<|endoftext|>")) {
|
||||
CODE_QWEN("CodeQwen", List.of("<|endoftext|>")) {
|
||||
@Override
|
||||
public String buildPrompt(String systemPrompt, String userPrompt, List<Message> history) {
|
||||
StringBuilder prompt = new StringBuilder();
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ public class LlamaSettingsState {
|
|||
private boolean runLocalServer = SystemInfoRt.isUnix;
|
||||
private boolean useCustomModel;
|
||||
private String customLlamaModelPath = "";
|
||||
private HuggingFaceModel huggingFaceModel = HuggingFaceModel.CODE_LLAMA_7B_Q4;
|
||||
private HuggingFaceModel huggingFaceModel = HuggingFaceModel.CODE_QWEN_2_5_1_5B_Q8_0;
|
||||
private PromptTemplate localModelPromptTemplate = PromptTemplate.LLAMA;
|
||||
private PromptTemplate remoteModelPromptTemplate = PromptTemplate.LLAMA;
|
||||
private InfillPromptTemplate localModelInfillPromptTemplate = InfillPromptTemplate.CODE_LLAMA;
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import ee.carlrobert.codegpt.codecompletions.CodeCompletionRequestFactory.buildL
|
|||
import ee.carlrobert.codegpt.codecompletions.CodeCompletionRequestFactory.buildOllamaRequest
|
||||
import ee.carlrobert.codegpt.codecompletions.CodeCompletionRequestFactory.buildOpenAIRequest
|
||||
import ee.carlrobert.codegpt.completions.CompletionClientProvider
|
||||
import ee.carlrobert.codegpt.completions.llama.LlamaModel
|
||||
import ee.carlrobert.codegpt.settings.GeneralSettings
|
||||
import ee.carlrobert.codegpt.settings.service.ServiceType
|
||||
import ee.carlrobert.codegpt.settings.service.ServiceType.*
|
||||
|
|
@ -34,7 +35,7 @@ class CodeCompletionService {
|
|||
.body
|
||||
.getOrDefault("model", null) as String
|
||||
|
||||
LLAMA_CPP -> "LlamaSettings.getCurrentState()."
|
||||
LLAMA_CPP -> LlamaModel.findByHuggingFaceModel(LlamaSettings.getCurrentState().huggingFaceModel).label
|
||||
OLLAMA -> service<OllamaSettings>().state.model
|
||||
else -> null
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,6 +50,23 @@ enum class InfillPromptTemplate(val label: String, val stopTokens: List<String>?
|
|||
}
|
||||
}
|
||||
},
|
||||
CODE_QWEN_2_5("CodeQwen2.5", listOf()) {
|
||||
override fun buildPrompt(infillDetails: InfillRequest): String {
|
||||
val infillPrompt =
|
||||
"<|fim_prefix|> ${infillDetails.prefix} <|fim_suffix|>${infillDetails.suffix} <|fim_middle|>"
|
||||
return if (infillDetails.context == null || infillDetails.context.contextElements.isEmpty()) {
|
||||
infillPrompt
|
||||
} else {
|
||||
"<|repo_name|>${infillDetails.context.getRepoName()}\n" +
|
||||
infillDetails.context.contextElements.map {
|
||||
"<|file_sep|>${it.filePath()} \n" +
|
||||
it.text()
|
||||
}.joinToString("") { it + "\n" } +
|
||||
"<|file_sep|>${infillDetails.context.enclosingElement.filePath()} \n" +
|
||||
infillPrompt
|
||||
}
|
||||
}
|
||||
},
|
||||
STABILITY("Stability AI", listOf("<|endoftext|>")) {
|
||||
override fun buildPrompt(infillDetails: InfillRequest): String {
|
||||
val infillPrompt =
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue