mirror of
https://github.com/carlrobertoh/ProxyAI.git
synced 2026-05-12 14:10:29 +00:00
feat: Support CodeQwen1.5-Chat model (#527)
* feat: Support CodeQwen1.5-Chat model * Declare model directories explicitly
This commit is contained in:
parent
e40630d796
commit
ee16bfee10
5 changed files with 233 additions and 76 deletions
|
|
@ -4,89 +4,139 @@ import static java.lang.String.format;
|
|||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public enum HuggingFaceModel {
|
||||
|
||||
CODE_LLAMA_7B_Q3(7, 3, "codellama-7b-instruct.Q3_K_M.gguf"),
|
||||
CODE_LLAMA_7B_Q4(7, 4, "codellama-7b-instruct.Q4_K_M.gguf"),
|
||||
CODE_LLAMA_7B_Q5(7, 5, "codellama-7b-instruct.Q5_K_M.gguf"),
|
||||
CODE_LLAMA_13B_Q3(13, 3, "codellama-13b-instruct.Q3_K_M.gguf"),
|
||||
CODE_LLAMA_13B_Q4(13, 4, "codellama-13b-instruct.Q4_K_M.gguf"),
|
||||
CODE_LLAMA_13B_Q5(13, 5, "codellama-13b-instruct.Q5_K_M.gguf"),
|
||||
CODE_LLAMA_34B_Q3(34, 3, "codellama-34b-instruct.Q3_K_M.gguf"),
|
||||
CODE_LLAMA_34B_Q4(34, 4, "codellama-34b-instruct.Q4_K_M.gguf"),
|
||||
CODE_LLAMA_34B_Q5(34, 5, "codellama-34b-instruct.Q5_K_M.gguf"),
|
||||
CODE_LLAMA_7B_Q3(7, 3, "CodeLlama-7B-Instruct-GGUF", "codellama-7b-instruct.Q3_K_M.gguf"),
|
||||
CODE_LLAMA_7B_Q4(7, 4, "CodeLlama-7B-Instruct-GGUF", "codellama-7b-instruct.Q4_K_M.gguf"),
|
||||
CODE_LLAMA_7B_Q5(7, 5, "CodeLlama-7B-Instruct-GGUF", "codellama-7b-instruct.Q5_K_M.gguf"),
|
||||
CODE_LLAMA_13B_Q3(13, 3, "CodeLlama-13B-Instruct-GGUF", "codellama-13b-instruct.Q3_K_M.gguf"),
|
||||
CODE_LLAMA_13B_Q4(13, 4, "CodeLlama-13B-Instruct-GGUF", "codellama-13b-instruct.Q4_K_M.gguf"),
|
||||
CODE_LLAMA_13B_Q5(13, 5, "CodeLlama-13B-Instruct-GGUF", "codellama-13b-instruct.Q5_K_M.gguf"),
|
||||
CODE_LLAMA_34B_Q3(34, 3, "CodeLlama-34B-Instruct-GGUF", "codellama-34b-instruct.Q3_K_M.gguf"),
|
||||
CODE_LLAMA_34B_Q4(34, 4, "CodeLlama-34B-Instruct-GGUF", "codellama-34b-instruct.Q4_K_M.gguf"),
|
||||
CODE_LLAMA_34B_Q5(34, 5, "CodeLlama-34B-Instruct-GGUF", "codellama-34b-instruct.Q5_K_M.gguf"),
|
||||
|
||||
CODE_BOOGA_34B_Q3(34, 3, "codebooga-34b-v0.1.Q3_K_M.gguf"),
|
||||
CODE_BOOGA_34B_Q4(34, 4, "codebooga-34b-v0.1.Q4_K_M.gguf"),
|
||||
CODE_BOOGA_34B_Q5(34, 5, "codebooga-34b-v0.1.Q5_K_M.gguf"),
|
||||
CODE_BOOGA_34B_Q3(34, 3, "CodeBooga-34B-v0.1-GGUF", "codebooga-34b-v0.1.Q3_K_M.gguf"),
|
||||
CODE_BOOGA_34B_Q4(34, 4, "CodeBooga-34B-v0.1-GGUF", "codebooga-34b-v0.1.Q4_K_M.gguf"),
|
||||
CODE_BOOGA_34B_Q5(34, 5, "CodeBooga-34B-v0.1-GGUF", "codebooga-34b-v0.1.Q5_K_M.gguf"),
|
||||
|
||||
DEEPSEEK_CODER_1_3B_Q3(1, 3, "deepseek-coder-1.3b-instruct.Q3_K_M.gguf", 0.705),
|
||||
DEEPSEEK_CODER_1_3B_Q4(1, 4, "deepseek-coder-1.3b-instruct.Q4_K_M.gguf", 0.874),
|
||||
DEEPSEEK_CODER_1_3B_Q5(1, 5, "deepseek-coder-1.3b-instruct.Q5_K_M.gguf", 1.0),
|
||||
DEEPSEEK_CODER_6_7B_Q3(7, 3, "deepseek-coder-6.7b-instruct.Q3_K_M.gguf"),
|
||||
DEEPSEEK_CODER_6_7B_Q4(7, 4, "deepseek-coder-6.7b-instruct.Q4_K_M.gguf"),
|
||||
DEEPSEEK_CODER_6_7B_Q5(7, 5, "deepseek-coder-6.7b-instruct.Q5_K_M.gguf"),
|
||||
DEEPSEEK_CODER_33B_Q3(33, 3, "deepseek-coder-33b-instruct.Q3_K_M.gguf", 16.1),
|
||||
DEEPSEEK_CODER_33B_Q4(33, 4, "deepseek-coder-33b-instruct.Q4_K_M.gguf", 19.9),
|
||||
DEEPSEEK_CODER_33B_Q5(33, 5, "deepseek-coder-33b-instruct.Q5_K_M.gguf", 23.5),
|
||||
DEEPSEEK_CODER_1_3B_Q3(1, 3, "deepseek-coder-1.3b-instruct-GGUF",
|
||||
"deepseek-coder-1.3b-instruct.Q3_K_M.gguf", 0.705),
|
||||
DEEPSEEK_CODER_1_3B_Q4(1, 4, "deepseek-coder-1.3b-instruct-GGUF",
|
||||
"deepseek-coder-1.3b-instruct.Q4_K_M.gguf", 0.874),
|
||||
DEEPSEEK_CODER_1_3B_Q5(1, 5, "deepseek-coder-1.3b-instruct-GGUF",
|
||||
"deepseek-coder-1.3b-instruct.Q5_K_M.gguf", 1.0),
|
||||
DEEPSEEK_CODER_6_7B_Q3(7, 3, "deepseek-coder-6.7b-instruct-GGUF",
|
||||
"deepseek-coder-6.7b-instruct.Q3_K_M.gguf"),
|
||||
DEEPSEEK_CODER_6_7B_Q4(7, 4, "deepseek-coder-6.7b-instruct-GGUF",
|
||||
"deepseek-coder-6.7b-instruct.Q4_K_M.gguf"),
|
||||
DEEPSEEK_CODER_6_7B_Q5(7, 5, "deepseek-coder-6.7b-instruct-GGUF",
|
||||
"deepseek-coder-6.7b-instruct.Q5_K_M.gguf"),
|
||||
DEEPSEEK_CODER_33B_Q3(33, 3, "deepseek-coder-33b-instruct-GGUF",
|
||||
"deepseek-coder-33b-instruct.Q3_K_M.gguf", 16.1),
|
||||
DEEPSEEK_CODER_33B_Q4(33, 4, "deepseek-coder-33b-instruct-GGUF",
|
||||
"deepseek-coder-33b-instruct.Q4_K_M.gguf", 19.9),
|
||||
DEEPSEEK_CODER_33B_Q5(33, 5, "deepseek-coder-33b-instruct-GGUF",
|
||||
"deepseek-coder-33b-instruct.Q5_K_M.gguf", 23.5),
|
||||
|
||||
PHIND_CODE_LLAMA_34B_Q3(34, 3, "phind-codellama-34b-v2.Q3_K_M.gguf"),
|
||||
PHIND_CODE_LLAMA_34B_Q4(34, 4, "phind-codellama-34b-v2.Q4_K_M.gguf"),
|
||||
PHIND_CODE_LLAMA_34B_Q5(34, 5, "phind-codellama-34b-v2.Q5_K_M.gguf"),
|
||||
PHIND_CODE_LLAMA_34B_Q3(34, 3, "Phind-CodeLlama-34B-v2-GGUF",
|
||||
"phind-codellama-34b-v2.Q3_K_M.gguf"),
|
||||
PHIND_CODE_LLAMA_34B_Q4(34, 4, "Phind-CodeLlama-34B-v2-GGUF",
|
||||
"phind-codellama-34b-v2.Q4_K_M.gguf"),
|
||||
PHIND_CODE_LLAMA_34B_Q5(34, 5, "Phind-CodeLlama-34B-v2-GGUF",
|
||||
"phind-codellama-34b-v2.Q5_K_M.gguf"),
|
||||
|
||||
WIZARD_CODER_PYTHON_7B_Q3(7, 3, "wizardcoder-python-7b-v1.0.Q3_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_7B_Q4(7, 4, "wizardcoder-python-7b-v1.0.Q4_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_7B_Q5(7, 5, "wizardcoder-python-7b-v1.0.Q5_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_13B_Q3(13, 3, "wizardcoder-python-13b-v1.0.Q3_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_13B_Q4(13, 4, "wizardcoder-python-13b-v1.0.Q4_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_13B_Q5(13, 5, "wizardcoder-python-13b-v1.0.Q5_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_34B_Q3(34, 3, "wizardcoder-python-34b-v1.0.Q3_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_34B_Q4(34, 4, "wizardcoder-python-34b-v1.0.Q4_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_34B_Q5(34, 5, "wizardcoder-python-34b-v1.0.Q5_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_7B_Q3(7, 3, "WizardCoder-Python-7B-V1.0-GGUF",
|
||||
"wizardcoder-python-7b-v1.0.Q3_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_7B_Q4(7, 4, "WizardCoder-Python-7B-V1.0-GGUF",
|
||||
"wizardcoder-python-7b-v1.0.Q4_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_7B_Q5(7, 5, "WizardCoder-Python-7B-V1.0-GGUF",
|
||||
"wizardcoder-python-7b-v1.0.Q5_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_13B_Q3(13, 3, "WizardCoder-Python-13B-V1.0-GGUF",
|
||||
"wizardcoder-python-13b-v1.0.Q3_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_13B_Q4(13, 4, "WizardCoder-Python-13B-V1.0-GGUF",
|
||||
"wizardcoder-python-13b-v1.0.Q4_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_13B_Q5(13, 5, "WizardCoder-Python-13B-V1.0-GGUF",
|
||||
"wizardcoder-python-13b-v1.0.Q5_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_34B_Q3(34, 3, "WizardCoder-Python-34B-V1.0-GGUF",
|
||||
"wizardcoder-python-34b-v1.0.Q3_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_34B_Q4(34, 4, "WizardCoder-Python-34B-V1.0-GGUF",
|
||||
"wizardcoder-python-34b-v1.0.Q4_K_M.gguf"),
|
||||
WIZARD_CODER_PYTHON_34B_Q5(34, 5, "WizardCoder-Python-34B-V1.0-GGUF",
|
||||
"wizardcoder-python-34b-v1.0.Q5_K_M.gguf"),
|
||||
|
||||
LLAMA_3_8B_IQ3_M(8, 3, "Meta-Llama-3-8B-Instruct-IQ3_M.gguf", "lmstudio-community", 3.78),
|
||||
LLAMA_3_8B_Q4_K_M(8, 4, "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf", "lmstudio-community", 4.92),
|
||||
LLAMA_3_8B_Q5_K_M(8, 5, "Meta-Llama-3-8B-Instruct-Q5_K_M.gguf", "lmstudio-community", 5.73),
|
||||
LLAMA_3_8B_Q6_K(8, 6, "Meta-Llama-3-8B-Instruct-Q6_K.gguf", "lmstudio-community", 6.6),
|
||||
LLAMA_3_8B_Q8_0(8, 8, "Meta-Llama-3-8B-Instruct-Q8_0.gguf", "lmstudio-community", 8.54),
|
||||
LLAMA_3_70B_IQ1(70, 1, "Meta-Llama-3-70B-Instruct-IQ1_M.gguf", "lmstudio-community", 16.8),
|
||||
LLAMA_3_70B_IQ2_XS(70, 2, "Meta-Llama-3-70B-Instruct-IQ2_XS.gguf", "lmstudio-community", 21.1),
|
||||
LLAMA_3_70B_Q4_K_M(70, 4, "Meta-Llama-3-70B-Instruct-Q4_K_M.gguf", "lmstudio-community", 42.5),
|
||||
LLAMA_3_8B_IQ3_M(8, 3, "Meta-Llama-3-8B-Instruct-GGUF", "Meta-Llama-3-8B-Instruct-IQ3_M.gguf",
|
||||
"lmstudio-community", 3.78),
|
||||
LLAMA_3_8B_Q4_K_M(8, 4, "Meta-Llama-3-8B-Instruct-GGUF", "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf",
|
||||
"lmstudio-community", 4.92),
|
||||
LLAMA_3_8B_Q5_K_M(8, 5, "Meta-Llama-3-8B-Instruct-GGUF", "Meta-Llama-3-8B-Instruct-Q5_K_M.gguf",
|
||||
"lmstudio-community", 5.73),
|
||||
LLAMA_3_8B_Q6_K(8, 6, "Meta-Llama-3-8B-Instruct-GGUF", "Meta-Llama-3-8B-Instruct-Q6_K.gguf",
|
||||
"lmstudio-community", 6.6),
|
||||
LLAMA_3_8B_Q8_0(8, 8, "Meta-Llama-3-8B-Instruct-GGUF", "Meta-Llama-3-8B-Instruct-Q8_0.gguf",
|
||||
"lmstudio-community", 8.54),
|
||||
LLAMA_3_70B_IQ1(70, 1, "Meta-Llama-3-70B-Instruct-GGUF", "Meta-Llama-3-70B-Instruct-IQ1_M.gguf",
|
||||
"lmstudio-community", 16.8),
|
||||
LLAMA_3_70B_IQ2_XS(70, 2, "Meta-Llama-3-70B-Instruct-GGUF",
|
||||
"Meta-Llama-3-70B-Instruct-IQ2_XS.gguf", "lmstudio-community", 21.1),
|
||||
LLAMA_3_70B_Q4_K_M(70, 4, "Meta-Llama-3-70B-Instruct-GGUF",
|
||||
"Meta-Llama-3-70B-Instruct-Q4_K_M.gguf", "lmstudio-community", 42.5),
|
||||
|
||||
PHI_3_3_8B_4K_IQ4_NL(4, 4, "Phi-3-mini-4k-instruct-IQ4_NL.gguf", "lmstudio-community", 2.18),
|
||||
PHI_3_3_8B_4K_Q5_K_M(4, 5, "Phi-3-mini-4k-instruct-Q5_K_M.gguf", "lmstudio-community", 2.64),
|
||||
PHI_3_3_8B_4K_Q6_K(4, 6, "Phi-3-mini-4k-instruct-Q6_K.gguf", "lmstudio-community", 3.14),
|
||||
PHI_3_3_8B_4K_Q8_0(4, 8, "Phi-3-mini-4k-instruct-Q8_0.gguf", "lmstudio-community", 4.06),
|
||||
PHI_3_3_8B_4K_FP16(4, 16, "Phi-3-mini-4k-instruct-fp16.gguf", "lmstudio-community", 7.64),
|
||||
PHI_3_3_8B_4K_IQ4_NL(4, 4, "Phi-3-mini-4k-instruct-GGUF", "Phi-3-mini-4k-instruct-IQ4_NL.gguf",
|
||||
"lmstudio-community", 2.18),
|
||||
PHI_3_3_8B_4K_Q5_K_M(4, 5, "Phi-3-mini-4k-instruct-GGUF", "Phi-3-mini-4k-instruct-Q5_K_M.gguf",
|
||||
"lmstudio-community", 2.64),
|
||||
PHI_3_3_8B_4K_Q6_K(4, 6, "Phi-3-mini-4k-instruct-GGUF", "Phi-3-mini-4k-instruct-Q6_K.gguf",
|
||||
"lmstudio-community", 3.14),
|
||||
PHI_3_3_8B_4K_Q8_0(4, 8, "Phi-3-mini-4k-instruct-GGUF", "Phi-3-mini-4k-instruct-Q8_0.gguf",
|
||||
"lmstudio-community", 4.06),
|
||||
PHI_3_3_8B_4K_FP16(4, 16, "Phi-3-mini-4k-instruct-GGUF", "Phi-3-mini-4k-instruct-fp16.gguf",
|
||||
"lmstudio-community", 7.64),
|
||||
|
||||
CODE_GEMMA_7B_Q3_K_M(7, 3, "codegemma-7b-it-Q3_K_M.gguf", "lmstudio-community", 4.37),
|
||||
CODE_GEMMA_7B_Q4_K_M(7, 4, "codegemma-7b-it-Q4_K_M.gguf", "lmstudio-community", 5.33),
|
||||
CODE_GEMMA_7B_Q5_K_M(7, 5, "codegemma-7b-it-Q5_K_M.gguf", "lmstudio-community", 6.14),
|
||||
CODE_GEMMA_7B_Q6_K(7, 6, "codegemma-7b-it-Q6_K.gguf", "lmstudio-community", 7.01),
|
||||
CODE_GEMMA_7B_Q8_0(7, 8, "codegemma-7b-it-Q8_0.gguf", "lmstudio-community", 9.08),
|
||||
CODE_GEMMA_7B_Q3_K_M(7, 3, "codegemma-7b-it-GGUF", "codegemma-7b-it-Q3_K_M.gguf",
|
||||
"lmstudio-community", 4.37),
|
||||
CODE_GEMMA_7B_Q4_K_M(7, 4, "codegemma-7b-it-GGUF", "codegemma-7b-it-Q4_K_M.gguf",
|
||||
"lmstudio-community", 5.33),
|
||||
CODE_GEMMA_7B_Q5_K_M(7, 5, "codegemma-7b-it-GGUF", "codegemma-7b-it-Q5_K_M.gguf",
|
||||
"lmstudio-community", 6.14),
|
||||
CODE_GEMMA_7B_Q6_K(7, 6, "codegemma-7b-it-GGUF", "codegemma-7b-it-Q6_K.gguf",
|
||||
"lmstudio-community", 7.01),
|
||||
CODE_GEMMA_7B_Q8_0(7, 8, "codegemma-7b-it-GGUF", "codegemma-7b-it-Q8_0.gguf",
|
||||
"lmstudio-community", 9.08),
|
||||
|
||||
CODE_QWEN_1_5_7B_Q3_K_M(7, 3, "Qwen_-_CodeQwen1.5-7B-Chat-gguf",
|
||||
"CodeQwen1.5-7B-Chat.Q3_K_M.gguf", "RichardErkhov", 3.81),
|
||||
CODE_QWEN_1_5_7B_Q4_K_M(7, 4, "Qwen_-_CodeQwen1.5-7B-Chat-gguf",
|
||||
"CodeQwen1.5-7B-Chat.Q4_K_M.gguf", "RichardErkhov", 4.74),
|
||||
CODE_QWEN_1_5_7B_Q5_K_M(7, 5, "Qwen_-_CodeQwen1.5-7B-Chat-gguf",
|
||||
"CodeQwen1.5-7B-Chat.Q5_K_M.gguf", "RichardErkhov", 5.43),
|
||||
CODE_QWEN_1_5_7B_Q6_K(7, 6, "Qwen_-_CodeQwen1.5-7B-Chat-gguf",
|
||||
"CodeQwen1.5-7B-Chat.Q6_K.gguf", "RichardErkhov", 6.38),
|
||||
;
|
||||
|
||||
private final int parameterSize;
|
||||
private final int quantization;
|
||||
private final String modelName;
|
||||
private final String directory;
|
||||
private final String fileName;
|
||||
private final String user;
|
||||
private final Double downloadSize; // in GB
|
||||
|
||||
HuggingFaceModel(int parameterSize, int quantization, String modelName) {
|
||||
this(parameterSize, quantization, modelName, "TheBloke", null);
|
||||
HuggingFaceModel(int parameterSize, int quantization, String directory, String fileName) {
|
||||
this(parameterSize, quantization, directory, fileName, "TheBloke", null);
|
||||
}
|
||||
|
||||
HuggingFaceModel(int parameterSize, int quantization, String modelName, Double downloadSize) {
|
||||
this(parameterSize, quantization, modelName, "TheBloke", downloadSize);
|
||||
}
|
||||
|
||||
HuggingFaceModel(int parameterSize, int quantization, String modelName, String user,
|
||||
HuggingFaceModel(int parameterSize, int quantization, String directory, String fileName,
|
||||
Double downloadSize) {
|
||||
this(parameterSize, quantization, directory, fileName, "TheBloke", downloadSize);
|
||||
}
|
||||
|
||||
HuggingFaceModel(int parameterSize, int quantization, String directory, String fileName,
|
||||
String user, Double downloadSize) {
|
||||
this.parameterSize = parameterSize;
|
||||
this.quantization = quantization;
|
||||
this.modelName = modelName;
|
||||
this.directory = directory;
|
||||
this.fileName = fileName;
|
||||
this.user = user;
|
||||
this.downloadSize = downloadSize;
|
||||
}
|
||||
|
|
@ -108,16 +158,13 @@ public enum HuggingFaceModel {
|
|||
}
|
||||
|
||||
public String getFileName() {
|
||||
if ("TheBloke".equals(user)) {
|
||||
return modelName.toLowerCase().replace("-gguf", format(".Q%d_K_M.gguf", quantization));
|
||||
}
|
||||
return modelName;
|
||||
return fileName;
|
||||
}
|
||||
|
||||
public URL getFileURL() {
|
||||
try {
|
||||
return new URL(
|
||||
"https://huggingface.co/%s/%s/resolve/main/%s".formatted(user, getDirectory(), getFileName()));
|
||||
"https://huggingface.co/%s/%s/resolve/main/%s".formatted(user, directory, fileName));
|
||||
} catch (MalformedURLException ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
|
|
@ -125,22 +172,13 @@ public enum HuggingFaceModel {
|
|||
|
||||
public URL getHuggingFaceURL() {
|
||||
try {
|
||||
return new URL("https://huggingface.co/%s/%s".formatted(user, getDirectory()));
|
||||
return new URL("https://huggingface.co/%s/%s".formatted(user, directory));
|
||||
} catch (MalformedURLException ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
private String getDirectory() {
|
||||
return modelName.replaceFirst("-[^.-]+\\.gguf$", "-GGUF");
|
||||
}
|
||||
|
||||
public String getQuantizationLabel() {
|
||||
return format("%d-bit precision", quantization);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return modelName;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -127,6 +127,23 @@ public enum LlamaModel {
|
|||
HuggingFaceModel.CODE_GEMMA_7B_Q5_K_M,
|
||||
HuggingFaceModel.CODE_GEMMA_7B_Q6_K,
|
||||
HuggingFaceModel.CODE_GEMMA_7B_Q8_0)),
|
||||
CODE_QWEN(
|
||||
"CodeQwen1.5", """
|
||||
A specialized codeLLM built upon the Qwen1.5 language model. \
|
||||
CodeQwen1.5-7B has been pretrained with around 3 trillion tokens of code-related data. \
|
||||
It supports an extensive repertoire of 92 programming languages, and it exhibits \
|
||||
exceptional capacity in long-context understanding and generation with the ability to \
|
||||
process information of 64K tokens. In terms of performance, CodeQwen1.5 demonstrates \
|
||||
impressive capabilities in basic code generation, long-context modelling, code editing \
|
||||
and SQL. We believe this model can significantly enhance developer productivity and \
|
||||
streamline software development workflows within diverse technological environments.""",
|
||||
PromptTemplate.CODE_QWEN,
|
||||
InfillPromptTemplate.CODE_QWEN,
|
||||
List.of(
|
||||
HuggingFaceModel.CODE_QWEN_1_5_7B_Q3_K_M,
|
||||
HuggingFaceModel.CODE_QWEN_1_5_7B_Q4_K_M,
|
||||
HuggingFaceModel.CODE_QWEN_1_5_7B_Q5_K_M,
|
||||
HuggingFaceModel.CODE_QWEN_1_5_7B_Q6_K)),
|
||||
;
|
||||
|
||||
private final String label;
|
||||
|
|
|
|||
|
|
@ -162,6 +162,30 @@ public enum PromptTemplate {
|
|||
.toString();
|
||||
}
|
||||
},
|
||||
CODE_QWEN("CodeQwen1.5", List.of("<|endoftext|>")) {
|
||||
@Override
|
||||
public String buildPrompt(String systemPrompt, String userPrompt, List<Message> history) {
|
||||
StringBuilder prompt = new StringBuilder();
|
||||
|
||||
if (systemPrompt != null && !systemPrompt.isBlank()) {
|
||||
prompt.append("<|im_start|>system\n")
|
||||
.append(systemPrompt)
|
||||
.append("<|im_end|>\n");
|
||||
}
|
||||
|
||||
for (Message message : history) {
|
||||
prompt.append("<|im_start|>user\n")
|
||||
.append(message.getPrompt())
|
||||
.append("<|im_end|>\n<|im_start|>assistant\n")
|
||||
.append(message.getResponse()).append("<|im_end|>\n");
|
||||
}
|
||||
|
||||
return prompt.append("<|im_start|>user\n")
|
||||
.append(userPrompt)
|
||||
.append("<|im_end|>\n<|im_start|>assistant\n")
|
||||
.toString();
|
||||
}
|
||||
},
|
||||
ALPACA("Alpaca/Vicuna") {
|
||||
@Override
|
||||
public String buildPrompt(String systemPrompt, String userPrompt, List<Message> history) {
|
||||
|
|
|
|||
|
|
@ -20,6 +20,11 @@ enum class InfillPromptTemplate(val label: String, val stopTokens: List<String>?
|
|||
return "<|fim_prefix|>$prefix<|fim_suffix|>$suffix<|fim_middle|>"
|
||||
}
|
||||
},
|
||||
CODE_QWEN("CodeQwen1.5", listOf("<|endoftext|>")) {
|
||||
override fun buildPrompt(prefix: String, suffix: String): String {
|
||||
return "<fim_prefix>$prefix<fim_suffix>$suffix<fim_middle>"
|
||||
}
|
||||
},
|
||||
STABILITY("Stability AI", listOf("<|endoftext|>")) {
|
||||
override fun buildPrompt(prefix: String, suffix: String): String {
|
||||
return "<fim_prefix>$prefix<fim_suffix>$suffix<fim_middle>"
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package ee.carlrobert.codegpt.completions
|
|||
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.ALPACA
|
||||
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.CHAT_ML
|
||||
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.CODE_GEMMA
|
||||
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.CODE_QWEN
|
||||
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.LLAMA
|
||||
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.LLAMA_3
|
||||
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.PHI_3
|
||||
|
|
@ -236,6 +237,78 @@ class PromptTemplateTest {
|
|||
""".trimIndent())
|
||||
}
|
||||
|
||||
@Test
|
||||
fun shouldBuildCodeQwenPromptWithoutHistory() {
|
||||
val prompt = CODE_QWEN.buildPrompt(SYSTEM_PROMPT, USER_PROMPT, listOf())
|
||||
|
||||
assertThat(prompt).isEqualTo("""
|
||||
<|im_start|>system
|
||||
TEST_SYSTEM_PROMPT<|im_end|>
|
||||
<|im_start|>user
|
||||
TEST_USER_PROMPT<|im_end|>
|
||||
<|im_start|>assistant
|
||||
|
||||
""".trimIndent())
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@NullAndEmptySource
|
||||
@ValueSource(strings = [" ", "\t", "\n"])
|
||||
fun shouldBuildCodeQwenPromptWithoutHistorySkippingBlankSystemPrompt(systemPrompt: String?) {
|
||||
val prompt = CODE_QWEN.buildPrompt(systemPrompt, USER_PROMPT, listOf())
|
||||
|
||||
assertThat(prompt).isEqualTo("""
|
||||
<|im_start|>user
|
||||
TEST_USER_PROMPT<|im_end|>
|
||||
<|im_start|>assistant
|
||||
|
||||
""".trimIndent())
|
||||
}
|
||||
|
||||
@Test
|
||||
fun shouldBuildCodeQwenPromptWithHistory() {
|
||||
val prompt = CODE_QWEN.buildPrompt(SYSTEM_PROMPT, USER_PROMPT, HISTORY)
|
||||
|
||||
assertThat(prompt).isEqualTo("""
|
||||
<|im_start|>system
|
||||
TEST_SYSTEM_PROMPT<|im_end|>
|
||||
<|im_start|>user
|
||||
TEST_PREV_PROMPT_1<|im_end|>
|
||||
<|im_start|>assistant
|
||||
TEST_PREV_RESPONSE_1<|im_end|>
|
||||
<|im_start|>user
|
||||
TEST_PREV_PROMPT_2<|im_end|>
|
||||
<|im_start|>assistant
|
||||
TEST_PREV_RESPONSE_2<|im_end|>
|
||||
<|im_start|>user
|
||||
TEST_USER_PROMPT<|im_end|>
|
||||
<|im_start|>assistant
|
||||
|
||||
""".trimIndent())
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@NullAndEmptySource
|
||||
@ValueSource(strings = [" ", "\t", "\n"])
|
||||
fun shouldBuildCodeQwenPromptWithHistorySkippingBlankSystemPrompt(systemPrompt: String?) {
|
||||
val prompt = CODE_QWEN.buildPrompt(systemPrompt, USER_PROMPT, HISTORY)
|
||||
|
||||
assertThat(prompt).isEqualTo("""
|
||||
<|im_start|>user
|
||||
TEST_PREV_PROMPT_1<|im_end|>
|
||||
<|im_start|>assistant
|
||||
TEST_PREV_RESPONSE_1<|im_end|>
|
||||
<|im_start|>user
|
||||
TEST_PREV_PROMPT_2<|im_end|>
|
||||
<|im_start|>assistant
|
||||
TEST_PREV_RESPONSE_2<|im_end|>
|
||||
<|im_start|>user
|
||||
TEST_USER_PROMPT<|im_end|>
|
||||
<|im_start|>assistant
|
||||
|
||||
""".trimIndent())
|
||||
}
|
||||
|
||||
@Test
|
||||
fun shouldBuildAlpacaPromptWithHistory() {
|
||||
val prompt = ALPACA.buildPrompt(SYSTEM_PROMPT, USER_PROMPT, HISTORY)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue