feat: Support CodeGemma 7b Instruct model (#524) (#525)

This commit is contained in:
Rene Leonhardt 2024-05-07 09:43:14 +02:00 committed by GitHub
parent f44fab551b
commit a2a8747aca
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 144 additions and 2 deletions

View file

@ -59,7 +59,31 @@ public enum HuggingFaceModel {
PHI_3_3_8B_4K_Q5_K_S(4, 5, "Phi-3-mini-4k-instruct-Q5_K_S.gguf", "lmstudio-community", 2.82),
PHI_3_3_8B_4K_Q6_K(4, 6, "Phi-3-mini-4k-instruct-Q6_K.gguf", "lmstudio-community", 3.14),
PHI_3_3_8B_4K_Q8_0(4, 8, "Phi-3-mini-4k-instruct-Q8_0.gguf", "lmstudio-community", 4.06),
PHI_3_3_8B_4K_FP16(4, 16, "Phi-3-mini-4k-instruct-fp16.gguf", "lmstudio-community", 7.64);
PHI_3_3_8B_4K_FP16(4, 16, "Phi-3-mini-4k-instruct-fp16.gguf", "lmstudio-community", 7.64),
CODE_GEMMA_7B_IQ1_S(7, 1, "codegemma-7b-it-IQ1_S.gguf", "lmstudio-community", 2.16),
CODE_GEMMA_7B_IQ1_M(7, 1, "codegemma-7b-it-IQ1_M.gguf", "lmstudio-community", 2.32),
CODE_GEMMA_7B_IQ2_XXS(7, 2, "codegemma-7b-it-IQ2_XXS.gguf", "lmstudio-community", 2.59),
CODE_GEMMA_7B_IQ2_XS(7, 2, "codegemma-7b-it-IQ2_XS.gguf", "lmstudio-community", 2.81),
CODE_GEMMA_7B_IQ2_S(7, 2, "codegemma-7b-it-IQ2_S.gguf", "lmstudio-community", 2.92),
CODE_GEMMA_7B_IQ2_M(7, 2, "codegemma-7b-it-IQ2_M.gguf", "lmstudio-community", 3.13),
CODE_GEMMA_7B_Q2_K(7, 2, "codegemma-7b-it-Q2_K.gguf", "lmstudio-community", 3.48),
CODE_GEMMA_7B_IQ3_XXS(7, 3, "codegemma-7b-it-IQ3_XXS.gguf", "lmstudio-community", 3.49),
CODE_GEMMA_7B_IQ3_XS(7, 3, "codegemma-7b-it-IQ3_XS.gguf", "lmstudio-community", 3.80),
CODE_GEMMA_7B_IQ3_S(7, 3, "codegemma-7b-it-IQ3_S.gguf", "lmstudio-community", 3.98),
CODE_GEMMA_7B_Q3_K_S(7, 3, "codegemma-7b-it-Q3_K_S.gguf", "lmstudio-community", 3.98),
CODE_GEMMA_7B_IQ3_M(7, 3, "codegemma-7b-it-IQ3_M.gguf", "lmstudio-community", 4.11),
CODE_GEMMA_7B_Q3_K_M(7, 3, "codegemma-7b-it-Q3_K_M.gguf", "lmstudio-community", 4.37),
CODE_GEMMA_7B_Q3_K_L(7, 3, "codegemma-7b-it-Q3_K_L.gguf", "lmstudio-community", 4.71),
CODE_GEMMA_7B_IQ4_XS(7, 4, "codegemma-7b-it-IQ4_XS.gguf", "lmstudio-community", 4.77),
CODE_GEMMA_7B_IQ4_NL(7, 4, "codegemma-7b-it-IQ4_NL.gguf", "lmstudio-community", 5.01),
CODE_GEMMA_7B_Q4_K_S(7, 4, "codegemma-7b-it-Q4_K_S.gguf", "lmstudio-community", 5.05),
CODE_GEMMA_7B_Q4_K_M(7, 4, "codegemma-7b-it-Q4_K_M.gguf", "lmstudio-community", 5.33),
CODE_GEMMA_7B_Q5_K_S(7, 5, "codegemma-7b-it-Q5_K_S.gguf", "lmstudio-community", 5.98),
CODE_GEMMA_7B_Q5_K_M(7, 5, "codegemma-7b-it-Q5_K_M.gguf", "lmstudio-community", 6.14),
CODE_GEMMA_7B_Q6_K(7, 6, "codegemma-7b-it-Q6_K.gguf", "lmstudio-community", 7.01),
CODE_GEMMA_7B_Q8_0(7, 8, "codegemma-7b-it-Q8_0.gguf", "lmstudio-community", 9.08),
;
private final int parameterSize;
private final int quantization;

View file

@ -113,7 +113,38 @@ public enum LlamaModel {
HuggingFaceModel.PHI_3_3_8B_4K_Q5_K_S,
HuggingFaceModel.PHI_3_3_8B_4K_Q6_K,
HuggingFaceModel.PHI_3_3_8B_4K_Q8_0,
HuggingFaceModel.PHI_3_3_8B_4K_FP16));
HuggingFaceModel.PHI_3_3_8B_4K_FP16)),
CODE_GEMMA(
"CodeGemma 7b Instruct",
"CodeGemma 7b Instruct is the first in a series of coding models released by Google. "
+ "As an instruct model, it specializes in being asked coding related questions, but can "
+ "also function as an autocomplete/fill-in-middle model for tools like co-pilot.\n"
+ "This model is perfect for general coding questions or code generation.",
PromptTemplate.CODE_GEMMA,
List.of(
HuggingFaceModel.CODE_GEMMA_7B_IQ1_S,
HuggingFaceModel.CODE_GEMMA_7B_IQ1_M,
HuggingFaceModel.CODE_GEMMA_7B_IQ2_XXS,
HuggingFaceModel.CODE_GEMMA_7B_IQ2_XS,
HuggingFaceModel.CODE_GEMMA_7B_IQ2_S,
HuggingFaceModel.CODE_GEMMA_7B_IQ2_M,
HuggingFaceModel.CODE_GEMMA_7B_Q2_K,
HuggingFaceModel.CODE_GEMMA_7B_IQ3_XXS,
HuggingFaceModel.CODE_GEMMA_7B_IQ3_XS,
HuggingFaceModel.CODE_GEMMA_7B_IQ3_S,
HuggingFaceModel.CODE_GEMMA_7B_Q3_K_S,
HuggingFaceModel.CODE_GEMMA_7B_IQ3_M,
HuggingFaceModel.CODE_GEMMA_7B_Q3_K_M,
HuggingFaceModel.CODE_GEMMA_7B_Q3_K_L,
HuggingFaceModel.CODE_GEMMA_7B_IQ4_XS,
HuggingFaceModel.CODE_GEMMA_7B_IQ4_NL,
HuggingFaceModel.CODE_GEMMA_7B_Q4_K_S,
HuggingFaceModel.CODE_GEMMA_7B_Q4_K_M,
HuggingFaceModel.CODE_GEMMA_7B_Q5_K_S,
HuggingFaceModel.CODE_GEMMA_7B_Q5_K_M,
HuggingFaceModel.CODE_GEMMA_7B_Q6_K,
HuggingFaceModel.CODE_GEMMA_7B_Q8_0)),
;
private final String label;
private final String description;

View file

@ -144,6 +144,24 @@ public enum PromptTemplate {
.toString();
}
},
CODE_GEMMA("CodeGemma 7b Instruct") {
@Override
public String buildPrompt(String systemPrompt, String userPrompt, List<Message> history) {
StringBuilder prompt = new StringBuilder();
for (Message message : history) {
prompt.append("<start_of_turn>user\n")
.append(message.getPrompt())
.append("<end_of_turn>\n<start_of_turn>model\n")
.append(message.getResponse()).append("<end_of_turn>\n");
}
return prompt.append("<start_of_turn>user\n")
.append(userPrompt)
.append("<end_of_turn>\n<start_of_turn>model\n")
.toString();
}
},
ALPACA("Alpaca/Vicuna") {
@Override
public String buildPrompt(String systemPrompt, String userPrompt, List<Message> history) {

View file

@ -2,6 +2,7 @@ package ee.carlrobert.codegpt.completions
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.ALPACA
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.CHAT_ML
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.CODE_GEMMA
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.LLAMA
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.LLAMA_3
import ee.carlrobert.codegpt.completions.llama.PromptTemplate.PHI_3
@ -167,6 +168,74 @@ class PromptTemplateTest {
<|assistant|>""".trimIndent())
}
@Test
fun shouldBuildCodeGemmaPromptWithoutHistory() {
val prompt = CODE_GEMMA.buildPrompt(SYSTEM_PROMPT, USER_PROMPT, listOf())
assertThat(prompt).isEqualTo("""
<start_of_turn>user
TEST_USER_PROMPT<end_of_turn>
<start_of_turn>model
""".trimIndent())
}
@ParameterizedTest
@NullAndEmptySource
@ValueSource(strings = [" ", "\t", "\n"])
fun shouldBuildCodeGemmaPromptWithoutHistorySkippingBlankSystemPrompt(systemPrompt: String?) {
val prompt = CODE_GEMMA.buildPrompt(systemPrompt, USER_PROMPT, listOf())
assertThat(prompt).isEqualTo("""
<start_of_turn>user
TEST_USER_PROMPT<end_of_turn>
<start_of_turn>model
""".trimIndent())
}
@Test
fun shouldBuildCodeGemmaPromptWithHistory() {
val prompt = CODE_GEMMA.buildPrompt(SYSTEM_PROMPT, USER_PROMPT, HISTORY)
assertThat(prompt).isEqualTo("""
<start_of_turn>user
TEST_PREV_PROMPT_1<end_of_turn>
<start_of_turn>model
TEST_PREV_RESPONSE_1<end_of_turn>
<start_of_turn>user
TEST_PREV_PROMPT_2<end_of_turn>
<start_of_turn>model
TEST_PREV_RESPONSE_2<end_of_turn>
<start_of_turn>user
TEST_USER_PROMPT<end_of_turn>
<start_of_turn>model
""".trimIndent())
}
@ParameterizedTest
@NullAndEmptySource
@ValueSource(strings = [" ", "\t", "\n"])
fun shouldBuildCodeGemmaPromptWithHistorySkippingBlankSystemPrompt(systemPrompt: String?) {
val prompt = CODE_GEMMA.buildPrompt(systemPrompt, USER_PROMPT, HISTORY)
assertThat(prompt).isEqualTo("""
<start_of_turn>user
TEST_PREV_PROMPT_1<end_of_turn>
<start_of_turn>model
TEST_PREV_RESPONSE_1<end_of_turn>
<start_of_turn>user
TEST_PREV_PROMPT_2<end_of_turn>
<start_of_turn>model
TEST_PREV_RESPONSE_2<end_of_turn>
<start_of_turn>user
TEST_USER_PROMPT<end_of_turn>
<start_of_turn>model
""".trimIndent())
}
@Test
fun shouldBuildAlpacaPromptWithHistory() {
val prompt = ALPACA.buildPrompt(SYSTEM_PROMPT, USER_PROMPT, HISTORY)