feat: Add new model provider Novita AI (#1508)

This commit is contained in:
Jason 2025-01-11 13:08:37 +08:00 committed by GitHub
parent 1059d3219a
commit e54977ef29
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 188 additions and 3 deletions

View file

@ -38,6 +38,11 @@ ENABLE_GEMINI=false
# GEMINI_API_KEY: Your Gemini API key for accessing models like GPT-4. # GEMINI_API_KEY: Your Gemini API key for accessing models like GPT-4.
GEMINI_API_KEY="" GEMINI_API_KEY=""
# ENABLE_NOVITA: Set to true to enable Novita AI as a language model provider.
ENABLE_NOVITA=false
# NOVITA_API_KEY: Your Novita AI API key.
NOVITA_API_KEY=""
# LLM_KEY: The chosen language model to use. This should be one of the models # LLM_KEY: The chosen language model to use. This should be one of the models
# provided by the enabled LLM providers (e.g., OPENAI_GPT4_TURBO, OPENAI_GPT4V, ANTHROPIC_CLAUDE3, AZURE_OPENAI_GPT4V). # provided by the enabled LLM providers (e.g., OPENAI_GPT4_TURBO, OPENAI_GPT4V, ANTHROPIC_CLAUDE3, AZURE_OPENAI_GPT4V).
LLM_KEY="" LLM_KEY=""

View file

@ -276,6 +276,7 @@ More extensive documentation can be found on our [documentation website](https:/
| Ollama | Coming soon (contributions welcome) | | Ollama | Coming soon (contributions welcome) |
| Gemini | Coming soon (contributions welcome) | | Gemini | Coming soon (contributions welcome) |
| Llama 3.2 | Coming soon (contributions welcome) | | Llama 3.2 | Coming soon (contributions welcome) |
| Novita AI | Llama 3.1 (8B, 70B), Llama 3.2 (1B, 3B, 11B Vision) |
#### Environment Variables #### Environment Variables
| Variable | Description| Type | Sample Value| | Variable | Description| Type | Sample Value|
@ -285,8 +286,9 @@ More extensive documentation can be found on our [documentation website](https:/
| `ENABLE_AZURE` | Register Azure OpenAI models | Boolean | `true`, `false` | | `ENABLE_AZURE` | Register Azure OpenAI models | Boolean | `true`, `false` |
| `ENABLE_BEDROCK` | Register AWS Bedrock models. To use AWS Bedrock, you need to make sure your [AWS configurations](https://github.com/boto/boto3?tab=readme-ov-file#using-boto3) are set up correctly first. | Boolean | `true`, `false` | | `ENABLE_BEDROCK` | Register AWS Bedrock models. To use AWS Bedrock, you need to make sure your [AWS configurations](https://github.com/boto/boto3?tab=readme-ov-file#using-boto3) are set up correctly first. | Boolean | `true`, `false` |
| `ENABLE_GEMINI` | Register Gemini models| Boolean | `true`, `false` | | `ENABLE_GEMINI` | Register Gemini models| Boolean | `true`, `false` |
| `ENABLE_NOVITA`| Register Novita AI models | Boolean | `true`, `false` |
| `LLM_KEY` | The name of the model you want to use | String | Currently supported llm keys: `OPENAI_GPT4_TURBO`, `OPENAI_GPT4V`, `OPENAI_GPT4O`, `OPENAI_GPT4O_MINI`, `ANTHROPIC_CLAUDE3`, `ANTHROPIC_CLAUDE3_OPUS`, `ANTHROPIC_CLAUDE3_SONNET`, `ANTHROPIC_CLAUDE3_HAIKU`, `ANTHROPIC_CLAUDE3.5_SONNET`, `BEDROCK_ANTHROPIC_CLAUDE3_OPUS`, `BEDROCK_ANTHROPIC_CLAUDE3_SONNET`, `BEDROCK_ANTHROPIC_CLAUDE3_HAIKU`, `BEDROCK_ANTHROPIC_CLAUDE3.5_SONNET`, `AZURE_OPENAI`, `GEMINI_PRO`, `GEMINI_FLASH`, `BEDROCK_AMAZON_NOVA_PRO`, `BEDROCK_AMAZON_NOVA_LITE`| | `LLM_KEY` | The name of the model you want to use | String | Currently supported llm keys: `OPENAI_GPT4_TURBO`, `OPENAI_GPT4V`, `OPENAI_GPT4O`, `OPENAI_GPT4O_MINI`, `ANTHROPIC_CLAUDE3`, `ANTHROPIC_CLAUDE3_OPUS`, `ANTHROPIC_CLAUDE3_SONNET`, `ANTHROPIC_CLAUDE3_HAIKU`, `ANTHROPIC_CLAUDE3.5_SONNET`, `BEDROCK_ANTHROPIC_CLAUDE3_OPUS`, `BEDROCK_ANTHROPIC_CLAUDE3_SONNET`, `BEDROCK_ANTHROPIC_CLAUDE3_HAIKU`, `BEDROCK_ANTHROPIC_CLAUDE3.5_SONNET`, `AZURE_OPENAI`, `GEMINI_PRO`, `GEMINI_FLASH`, `BEDROCK_AMAZON_NOVA_PRO`, `BEDROCK_AMAZON_NOVA_LITE`|
| `SECONDARY_LLM_KEY` | The name of the model for mini agents skyvern runs with | String | Currently supported llm keys: `OPENAI_GPT4_TURBO`, `OPENAI_GPT4V`, `OPENAI_GPT4O`, `OPENAI_GPT4O_MINI`, `ANTHROPIC_CLAUDE3`, `ANTHROPIC_CLAUDE3_OPUS`, `ANTHROPIC_CLAUDE3_SONNET`, `ANTHROPIC_CLAUDE3_HAIKU`, `ANTHROPIC_CLAUDE3.5_SONNET`, `BEDROCK_ANTHROPIC_CLAUDE3_OPUS`, `BEDROCK_ANTHROPIC_CLAUDE3_SONNET`, `BEDROCK_ANTHROPIC_CLAUDE3_HAIKU`, `BEDROCK_ANTHROPIC_CLAUDE3.5_SONNET`, `AZURE_OPENAI`, `GEMINI_PRO`, `GEMINI_FLASH`| | `SECONDARY_LLM_KEY` | The name of the model for mini agents skyvern runs with | String | Currently supported llm keys: `OPENAI_GPT4_TURBO`, `OPENAI_GPT4V`, `OPENAI_GPT4O`, `OPENAI_GPT4O_MINI`, `ANTHROPIC_CLAUDE3`, `ANTHROPIC_CLAUDE3_OPUS`, `ANTHROPIC_CLAUDE3_SONNET`, `ANTHROPIC_CLAUDE3_HAIKU`, `ANTHROPIC_CLAUDE3.5_SONNET`, `BEDROCK_ANTHROPIC_CLAUDE3_OPUS`, `BEDROCK_ANTHROPIC_CLAUDE3_SONNET`, `BEDROCK_ANTHROPIC_CLAUDE3_HAIKU`, `BEDROCK_ANTHROPIC_CLAUDE3.5_SONNET`, `AZURE_OPENAI`, `GEMINI_PRO`, `GEMINI_FLASH`, `NOVITA_LLAMA_3_3_70B`, `NOVITA_LLAMA_3_2_1B`, `NOVITA_LLAMA_3_2_3B`, `NOVITA_LLAMA_3_2_11B_VISION`, `NOVITA_LLAMA_3_1_8B`, `NOVITA_LLAMA_3_1_70B`, `NOVITA_LLAMA_3_1_405B`, `NOVITA_LLAMA_3_8B`, `NOVITA_LLAMA_3_70B`|
| `OPENAI_API_KEY` | OpenAI API Key | String | `sk-1234567890` | | `OPENAI_API_KEY` | OpenAI API Key | String | `sk-1234567890` |
| `OPENAI_API_BASE` | OpenAI API Base, optional | String | `https://openai.api.base` | | `OPENAI_API_BASE` | OpenAI API Base, optional | String | `https://openai.api.base` |
| `OPENAI_ORGANIZATION` | OpenAI Organization ID, optional | String | `your-org-id` | | `OPENAI_ORGANIZATION` | OpenAI Organization ID, optional | String | `your-org-id` |

View file

@ -115,6 +115,33 @@ setup_llm_providers() {
update_or_add_env_var "ENABLE_GEMINI" "false" update_or_add_env_var "ENABLE_GEMINI" "false"
fi fi
# Novita AI Configuration
echo "To enable Novita AI, you must have an Novita AI API key."
read -p "Do you want to enable Novita AI (y/n)? " enable_novita
if [[ "$enable_novita" == "y" ]]; then
read -p "Enter your Novita AI API key: " novita_api_key
if [ -z "$novita_api_key" ]; then
echo "Error: Novita AI API key is required."
echo "Novita AI will not be enabled."
else
update_or_add_env_var "NOVITA_API_KEY" "$novita_api_key"
update_or_add_env_var "ENABLE_NOVITA" "true"
model_options+=(
"NOVITA_LLAMA_3_3_70B"
"NOVITA_LLAMA_3_2_1B"
"NOVITA_LLAMA_3_2_3B"
"NOVITA_LLAMA_3_2_11B_VISION"
"NOVITA_LLAMA_3_1_8B"
"NOVITA_LLAMA_3_1_70B"
"NOVITA_LLAMA_3_1_405B"
"NOVITA_LLAMA_3_8B"
"NOVITA_LLAMA_3_70B"
)
fi
else
update_or_add_env_var "ENABLE_NOVITA" "false"
fi
# Model Selection # Model Selection
if [ ${#model_options[@]} -eq 0 ]; then if [ ${#model_options[@]} -eq 0 ]; then
echo "No LLM providers enabled. You won't be able to run Skyvern unless you enable at least one provider. You can re-run this script to enable providers or manually update the .env file." echo "No LLM providers enabled. You won't be able to run Skyvern unless you enable at least one provider. You can re-run this script to enable providers or manually update the .env file."

View file

@ -124,6 +124,11 @@ class Settings(BaseSettings):
# GEMINI # GEMINI
GEMINI_API_KEY: str | None = None GEMINI_API_KEY: str | None = None
# NOVITA AI
ENABLE_NOVITA: bool = False
NOVITA_API_KEY: str | None = None
NOVITA_API_VERSION: str = "v3"
# TOTP Settings # TOTP Settings
TOTP_LIFESPAN_MINUTES: int = 10 TOTP_LIFESPAN_MINUTES: int = 10
VERIFICATION_CODE_INITIAL_WAIT_TIME_SECS: int = 40 VERIFICATION_CODE_INITIAL_WAIT_TIME_SECS: int = 40

View file

@ -147,7 +147,11 @@ class LLMAPIHandlerFactory:
ai_suggestion=ai_suggestion, ai_suggestion=ai_suggestion,
) )
if step: if step:
try:
llm_cost = litellm.completion_cost(completion_response=response) llm_cost = litellm.completion_cost(completion_response=response)
except Exception as e:
LOG.exception("Failed to calculate LLM cost", error=str(e))
llm_cost = 0
prompt_tokens = response.get("usage", {}).get("prompt_tokens", 0) prompt_tokens = response.get("usage", {}).get("prompt_tokens", 0)
completion_tokens = response.get("usage", {}).get("completion_tokens", 0) completion_tokens = response.get("usage", {}).get("completion_tokens", 0)
await app.DATABASE.update_step( await app.DATABASE.update_step(
@ -289,7 +293,11 @@ class LLMAPIHandlerFactory:
) )
if step: if step:
try:
llm_cost = litellm.completion_cost(completion_response=response) llm_cost = litellm.completion_cost(completion_response=response)
except Exception as e:
LOG.exception("Failed to calculate LLM cost", error=str(e))
llm_cost = 0
prompt_tokens = response.get("usage", {}).get("prompt_tokens", 0) prompt_tokens = response.get("usage", {}).get("prompt_tokens", 0)
completion_tokens = response.get("usage", {}).get("completion_tokens", 0) completion_tokens = response.get("usage", {}).get("completion_tokens", 0)
await app.DATABASE.update_step( await app.DATABASE.update_step(

View file

@ -52,6 +52,7 @@ if not any(
settings.ENABLE_AZURE_GPT4O_MINI, settings.ENABLE_AZURE_GPT4O_MINI,
settings.ENABLE_BEDROCK, settings.ENABLE_BEDROCK,
settings.ENABLE_GEMINI, settings.ENABLE_GEMINI,
settings.ENABLE_NOVITA,
] ]
): ):
raise NoProviderEnabledError() raise NoProviderEnabledError()
@ -287,3 +288,140 @@ if settings.ENABLE_GEMINI:
max_output_tokens=8192, max_output_tokens=8192,
), ),
) )
if settings.ENABLE_NOVITA:
LLMConfigRegistry.register_config(
"NOVITA_LLAMA_3_3_70B",
LLMConfig(
"openai/meta-llama/llama-3.3-70b-instruct",
["NOVITA_API_KEY"],
supports_vision=False,
add_assistant_prefix=False,
litellm_params=LiteLLMParams(
api_base="https://api.novita.ai/v3/openai",
api_key=settings.NOVITA_API_KEY,
api_version=settings.NOVITA_API_VERSION,
model_info={"model_name": "openai/meta-llama/llama-3.3-70b-instruct"},
),
),
)
LLMConfigRegistry.register_config(
"NOVITA_LLAMA_3_2_1B",
LLMConfig(
"openai/meta-llama/llama-3.2-1b-instruct",
["NOVITA_API_KEY"],
supports_vision=False,
add_assistant_prefix=False,
litellm_params=LiteLLMParams(
api_base="https://api.novita.ai/v3/openai",
api_key=settings.NOVITA_API_KEY,
api_version=settings.NOVITA_API_VERSION,
model_info={"model_name": "openai/meta-llama/llama-3.2-1b-instruct"},
),
),
)
LLMConfigRegistry.register_config(
"NOVITA_LLAMA_3_2_3B",
LLMConfig(
"openai/meta-llama/llama-3.2-3b-instruct",
["NOVITA_API_KEY"],
supports_vision=False,
add_assistant_prefix=False,
litellm_params=LiteLLMParams(
api_base="https://api.novita.ai/v3/openai",
api_key=settings.NOVITA_API_KEY,
api_version=settings.NOVITA_API_VERSION,
model_info={"model_name": "openai/meta-llama/llama-3.2-3b-instruct"},
),
),
)
LLMConfigRegistry.register_config(
"NOVITA_LLAMA_3_2_11B_VISION",
LLMConfig(
"openai/meta-llama/llama-3.2-11b-vision-instruct",
["NOVITA_API_KEY"],
supports_vision=True,
add_assistant_prefix=False,
litellm_params=LiteLLMParams(
api_base="https://api.novita.ai/v3/openai",
api_key=settings.NOVITA_API_KEY,
api_version=settings.NOVITA_API_VERSION,
model_info={"model_name": "openai/meta-llama/llama-3.2-11b-vision-instruct"},
),
),
)
LLMConfigRegistry.register_config(
"NOVITA_LLAMA_3_1_8B",
LLMConfig(
"openai/meta-llama/llama-3.1-8b-instruct",
["NOVITA_API_KEY"],
supports_vision=False,
add_assistant_prefix=False,
litellm_params=LiteLLMParams(
api_base="https://api.novita.ai/v3/openai",
api_key=settings.NOVITA_API_KEY,
api_version=settings.NOVITA_API_VERSION,
model_info={"model_name": "openai/meta-llama/llama-3.1-8b-instruct"},
),
),
)
LLMConfigRegistry.register_config(
"NOVITA_LLAMA_3_1_70B",
LLMConfig(
"openai/meta-llama/llama-3.1-70b-instruct",
["NOVITA_API_KEY"],
supports_vision=False,
add_assistant_prefix=False,
litellm_params=LiteLLMParams(
api_base="https://api.novita.ai/v3/openai",
api_key=settings.NOVITA_API_KEY,
api_version=settings.NOVITA_API_VERSION,
model_info={"model_name": "openai/meta-llama/llama-3.1-70b-instruct"},
),
),
)
LLMConfigRegistry.register_config(
"NOVITA_LLAMA_3_1_405B",
LLMConfig(
"openai/meta-llama/llama-3.1-405b-instruct",
["NOVITA_API_KEY"],
supports_vision=False,
add_assistant_prefix=False,
litellm_params=LiteLLMParams(
api_base="https://api.novita.ai/v3/openai",
api_key=settings.NOVITA_API_KEY,
api_version=settings.NOVITA_API_VERSION,
model_info={"model_name": "openai/meta-llama/llama-3.1-405b-instruct"},
),
),
)
LLMConfigRegistry.register_config(
"NOVITA_LLAMA_3_8B",
LLMConfig(
"openai/meta-llama/llama-3-8b-instruct",
["NOVITA_API_KEY"],
supports_vision=False,
add_assistant_prefix=False,
litellm_params=LiteLLMParams(
api_base="https://api.novita.ai/v3/openai",
api_key=settings.NOVITA_API_KEY,
api_version=settings.NOVITA_API_VERSION,
model_info={"model_name": "openai/meta-llama/llama-3-8b-instruct"},
),
),
)
LLMConfigRegistry.register_config(
"NOVITA_LLAMA_3_70B",
LLMConfig(
"openai/meta-llama/llama-3-70b-instruct",
["NOVITA_API_KEY"],
supports_vision=False,
add_assistant_prefix=False,
litellm_params=LiteLLMParams(
api_base="https://api.novita.ai/v3/openai",
api_key=settings.NOVITA_API_KEY,
api_version=settings.NOVITA_API_VERSION,
model_info={"model_name": "openai/meta-llama/llama-3-70b-instruct"},
),
),
)