mirror of
https://github.com/carlrobertoh/ProxyAI.git
synced 2026-05-11 04:50:31 +00:00
232 lines
19 KiB
INI
232 lines
19 KiB
INI
project.label=CodeGPT
|
|
notification.group.name=CodeGPT notification group
|
|
notification.group.sticky.name=CodeGPT notification group sticky
|
|
action.generateCommitMessage.title=Generate Message
|
|
action.generateCommitMessage.description=Generate commit message
|
|
action.generateCommitMessage.serviceWarning=Messages can only be generated with OpenAI, Custom OpenAI, or Azure service
|
|
action.generateCommitMessage.missingCredentials=Credentials not provided
|
|
action.includeFilesInContext.title=Include In Context...
|
|
action.includeFileInContext.title=Include File In Context...
|
|
action.includeFilesInContext.dialog.title=Include In Context
|
|
action.includeFilesInContext.dialog.description=Choose the files that you wish to include in the final prompt
|
|
action.includeFilesInContext.dialog.repeatableContext.label=Repeatable context:
|
|
action.includeFilesInContext.dialog.restoreToDefaults.label=Restore to Defaults
|
|
action.openSettings.title=Open Settings
|
|
action.openSettings.description=Open CodeGPT settings
|
|
action.statusbar.startServer.text=Start Server
|
|
action.statusbar.startServer.description=Start LLaMA Server
|
|
action.statusbar.startServer.MainMenu.text=Start Server
|
|
action.statusbar.stopServer.text=Stop Server
|
|
action.statusbar.stopServer.description=Stop LLaMA Server
|
|
action.statusbar.stopServer.MainMenu.text=Stop Server
|
|
action.statusbar.enableCompletions.text=Enable Completions
|
|
action.statusbar.enableCompletions.description=Enable Code Completions
|
|
action.statusbar.enableCompletions.MainMenu.text=Enable Completions
|
|
action.statusbar.disableCompletions.text=Disable Completions
|
|
action.statusbar.disableCompletions.description=Disable Code Completions
|
|
action.statusbar.disableCompletions.MainMenu.text=Disable Completions
|
|
settings.displayName=CodeGPT: Settings
|
|
settings.openaiQuotaExceeded=OpenAI quota exceeded.
|
|
settingsConfigurable.displayName.label=Display name:
|
|
settingsConfigurable.service.label=Selected provider:
|
|
settingsConfigurable.service.codegpt.apiKey.comment=You can find the API key in your <a href="https://codegpt.carlrobert.ee/account">User settings</a>.
|
|
settingsConfigurable.service.codegpt.chatCompletionModel.comment=Choose a model optimized for conversational interactions, including assistance with general queries and explanations.
|
|
settingsConfigurable.service.codegpt.codeCompletionModel.comment=Choose a model tailored for code completion-related tasks.
|
|
settingsConfigurable.service.custom.openai.apiKey.comment=A secret value stored in the system's Keychain or KeePass, depending on your OS. This approach is recommended over storing the secret in the header as plain text.
|
|
settingsConfigurable.service.openai.apiKey.comment=You can find the API key in your <a href="https://platform.openai.com/account/api-keys">User settings</a>.
|
|
settingsConfigurable.service.openai.customModel.label=Custom model:
|
|
settingsConfigurable.service.openai.organization.label=Organization:
|
|
settingsConfigurable.section.openai.organization.comment=Useful when you are part of multiple organizations <sup><strong>optional</strong></sup>
|
|
settingsConfigurable.service.google.apiKey.comment=You can find the API key in your <a href="https://aistudio.google.com/app/apikey">User settings</a>.
|
|
settingsConfigurable.service.google.model.comment=Note: Gemini Vision models <a href="https://ai.google.dev/gemini-api/docs/get-started/web?multi-turn-conversations-chat&hl=en#multi-turn-conversations-chat">do not yet support chats</a>.
|
|
settingsConfigurable.service.anthropic.apiKey.comment=You can find the API key in your <a href="https://console.anthropic.com/settings/keys">User settings</a>.
|
|
settingsConfigurable.service.anthropic.apiVersion.comment=We always recommend using the <a href="https://docs.anthropic.com/claude/reference/versions">latest API version</a> whenever possible.
|
|
settingsConfigurable.service.anthropic.model.comment=For details on model comparison metrics, see <a href="https://docs.anthropic.com/claude/docs/models-overview#model-comparison">model comparison</a>.
|
|
settingsConfigurable.service.azure.resourceName.label=Resource name:
|
|
settingsConfigurable.service.azure.resourceName.comment=The name of your Azure OpenAI resource.
|
|
settingsConfigurable.service.azure.deploymentId.label=Deployment ID:
|
|
settingsConfigurable.service.azure.deploymentId.comment=The name of your model deployment. You're required to first deploy a model before you can make calls.
|
|
settingsConfigurable.service.azure.apiVersion.comment=The API version to use for this operation. This follows the YYYY-MM-DD format.
|
|
settingsConfigurable.service.azure.bearerToken.label=Bearer token:
|
|
settingsConfigurable.service.azure.useApiKeyAuth.label=Use API key authentication
|
|
settingsConfigurable.service.azure.useActiveDirectoryAuth.label=Use Active Directory authentication
|
|
settingsConfigurable.service.you.email.label=Email address:
|
|
settingsConfigurable.service.you.password.label=Password:
|
|
settingsConfigurable.service.you.signIn.label=Sign In
|
|
settingsConfigurable.service.you.signOut.label=Sign Out
|
|
settingsConfigurable.service.you.displayResults.label=Display web search results
|
|
settingsConfigurable.service.you.authentication.title=Authentication (Optional)
|
|
settingsConfigurable.service.you.userInformation.title=User Information
|
|
settingsConfigurable.service.you.chatPreferences.title=Chat Preferences
|
|
settingsConfigurable.service.llama.modelPreferences.title=Model Preferences
|
|
settingsConfigurable.service.llama.serverPreferences.title=Server Preferences
|
|
settingsConfigurable.service.llama.modelSize.label=Model size:
|
|
settingsConfigurable.service.llama.quantization.label=Quantization:
|
|
settingsConfigurable.service.llama.quantization.comment=Quantization is a technique to reduce the computational and memory costs of running inference. <a href="https://huggingface.co/docs/optimum/concept_guides/quantization">Learn more</a>
|
|
settingsConfigurable.service.llama.customModelPath.label=Model path:
|
|
settingsConfigurable.service.llama.customModelPath.comment=Only .gguf files are supported
|
|
settingsConfigurable.service.llama.customServerPath.label=Server path:
|
|
settingsConfigurable.service.llama.customServerPath.comment=Precompiled executable llama-cpp server, only .exe (Windows) or executable File (Linux) are supported
|
|
settingsConfigurable.service.llama.promptTemplate.comment=Choose the template to use during interactions with the language model. Make sure it matches the custom model you're working with.
|
|
settingsConfigurable.service.llama.infillTemplate.comment=The template to use for code completions. Make sure the model you're working with supports code infilling.
|
|
settingsConfigurable.service.llama.downloadModelLink.label=Download Model
|
|
settingsConfigurable.service.llama.cancelDownloadLink.label=Cancel Downloading
|
|
settingsConfigurable.service.llama.linkToModel.label=Link to model
|
|
settingsConfigurable.service.llama.contextSize.label=Prompt context size:
|
|
settingsConfigurable.service.llama.contextSize.comment=The size of the prompt context. LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
|
|
settingsConfigurable.service.llama.threads.label=Threads:
|
|
settingsConfigurable.service.llama.threads.comment=The number of threads available to execute the model. It is not recommended to specify a number greater than the number of processor cores.
|
|
settingsConfigurable.service.llama.additionalParameters.label=Additional parameters:
|
|
settingsConfigurable.service.llama.additionalParameters.comment=<html>Additional command-line parameters for the server startup process, separated by commas. See the full <a href="https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md">list of options</a>.<p><i>Example: "--n-gpu-layers, 1, --no-mmap, --mlock"</i></p></html>
|
|
settingsConfigurable.service.llama.additionalBuildParameters.label=Additional build parameters:
|
|
settingsConfigurable.service.llama.additionalBuildParameters.comment=<html>Additional command-line parameters for the server build process, separated by commas. See the full <a href="https://github.com/ggerganov/llama.cpp/tree/master?tab=readme-ov-file#build">list of build options</a>.<p><i>Example: "LLAMA_CUDA=1,CUDA_DOCKER_ARCH=all"</i></p></html>
|
|
settingsConfigurable.service.llama.additionalEnvironmentVariables.label=Additional environment variables:
|
|
settingsConfigurable.service.llama.additionalEnvironmentVariables.comment=<html>Additional environment variables for the server build and run process, separated by whitespaces. Can be used to e.g. set CUDA variables (see the full <a href="https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars">list of env vars</a>)<p><i>Example: "CUDA_VISIBLE_DEVICES=0,1"</i></p></html>
|
|
settingsConfigurable.service.llama.baseHost.label=Base host:
|
|
settingsConfigurable.service.llama.baseHost.comment=URL to existing LLama server
|
|
settingsConfigurable.service.llama.startServer.label=Start server
|
|
settingsConfigurable.service.llama.startServer.opposite=Stop
|
|
settingsConfigurable.service.llama.stopServer.label=Stop server
|
|
settingsConfigurable.service.llama.stopServer.opposite=Start
|
|
settingsConfigurable.service.llama.progress.serverRunning=Server running
|
|
settingsConfigurable.service.llama.progress.serverStopped=Server stopped
|
|
settingsConfigurable.service.llama.progress.stoppingServer=Stopping server...
|
|
settingsConfigurable.service.llama.progress.startingServer=Starting server...
|
|
settingsConfigurable.service.llama.progress.downloadingModel.title=Downloading Model
|
|
settingsConfigurable.service.llama.progress.downloadingModelIndicator.text=Downloading %s...
|
|
settingsConfigurable.service.llama.overlay.modelNotDownloaded.text=Model is not downloaded
|
|
settingsConfigurable.shared.authentication.title=Authentication
|
|
settingsConfigurable.shared.requestConfiguration.title=Request Configuration
|
|
settingsConfigurable.shared.apiKey.label=API key:
|
|
settingsConfigurable.shared.apiKey.comment=API Key for authentication, added to 'Authorization' header as bearer (Optional)
|
|
settingsConfigurable.shared.baseHost.label=Base host:
|
|
settingsConfigurable.shared.path.label=Path:
|
|
settingsConfigurable.shared.model.label=Model:
|
|
configurationConfigurable.displayName=CodeGPT: Configuration
|
|
configurationConfigurable.table.title=Editor Actions
|
|
configurationConfigurable.table.emptyText=No actions configured
|
|
configurationConfigurable.table.header.actionColumnLabel=Action
|
|
configurationConfigurable.table.header.promptColumnLabel=Prompt
|
|
configurationConfigurable.table.action.revertToDefaults.text=Revert to Defaults
|
|
configurationConfigurable.table.action.addKeymap.text=Add Shortcut
|
|
configurationConfigurable.checkForPluginUpdates.label=Check for plugin updates automatically
|
|
configurationConfigurable.checkForNewScreenshots.label=Check for new screenshots automatically
|
|
configurationConfigurable.openNewTabCheckBox.label=Open a new chat on each action
|
|
configurationConfigurable.enableMethodNameGeneration.label=Enable method name lookup suggestions
|
|
configurationConfigurable.autoFormatting.label=Enable automatic code formatting
|
|
configurationConfigurable.autocompletionPostProcessing.label=Enable code completion post processing
|
|
configurationConfigurable.section.assistant.title=Assistant Configuration
|
|
configurationConfigurable.section.assistant.systemPromptField.label=System prompt:
|
|
configurationConfigurable.section.assistant.systemPromptField.comment=The system message helps to set the behaviour of the assistant
|
|
configurationConfigurable.section.assistant.temperatureField.label=Temperature:
|
|
configurationConfigurable.section.assistant.temperatureField.comment=The value of randomness. Must be between 0 and 1
|
|
configurationConfigurable.section.assistant.maxTokensField.label=Max completion tokens:
|
|
configurationConfigurable.section.assistant.maxTokensField.comment=The maximum capacity for completion.
|
|
configurationConfigurable.section.assistant.llamacppParams.title=Configuration Options for llama.cpp
|
|
settingsConfigurable.service.llama.topK.label=Top K:
|
|
settingsConfigurable.service.llama.topK.comment=Limit the next token selection to the K most probable tokens (default: 40)
|
|
settingsConfigurable.service.llama.topP.label=Top P:
|
|
settingsConfigurable.service.llama.topP.comment=Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P (default: 0.9)
|
|
settingsConfigurable.service.llama.minP.label=Min P:
|
|
settingsConfigurable.service.llama.minP.comment=Sets a minimum base probability threshold for token selection (default: 0.05)
|
|
settingsConfigurable.service.llama.repeatPenalty.label=Repeat penalty:
|
|
settingsConfigurable.service.llama.repeatPenalty.comment=Control the repetition of token sequences in the generated text (default: 1.1)
|
|
settingsConfigurable.service.custom.openai.testConnection.label=Test Connection
|
|
settingsConfigurable.service.custom.openai.presetTemplate.label=Preset template:
|
|
settingsConfigurable.service.custom.openai.url.label=URL:
|
|
settingsConfigurable.service.custom.openai.linkToDocs=Link to API docs
|
|
settingsConfigurable.service.custom.openai.connectionSuccess=Connection successful.
|
|
settingsConfigurable.service.custom.openai.connectionFailed=Connection failed.
|
|
settingsConfigurable.service.ollama.models.refresh=Refresh Models
|
|
configurationConfigurable.section.commitMessage.title=Commit Message Template
|
|
configurationConfigurable.section.commitMessage.systemPromptField.label=Prompt template:
|
|
configurationConfigurable.section.inlineCompletion.title=Inline Completion
|
|
configurationConfigurable.section.inlineCompletion.systemPromptField.label=Prompt:
|
|
configurationConfigurable.section.inlineCompletion.systemPromptField.comment=Custom system prompt used for inline code generation (Fill in the Middle (FIM) template).<br/>The {pre}, {suf} and {mid} are replaced depending on the used Model's FIM template.
|
|
configurationConfigurable.section.inlineCompletion.delay.label=Delay:
|
|
configurationConfigurable.section.inlineCompletion.delay.comment=Inline completion is requested if user is idle for x milliseconds
|
|
advancedSettingsConfigurable.displayName=CodeGPT: Advanced Settings
|
|
advancedSettingsConfigurable.proxy.title=HTTP/SOCKS Proxy
|
|
advancedSettingsConfigurable.proxy.typeComboBoxField.label=Proxy:
|
|
advancedSettingsConfigurable.proxy.hostField.label=Host name:
|
|
advancedSettingsConfigurable.proxy.authCheckBoxField.label=Proxy authentication
|
|
advancedSettingsConfigurable.proxy.usernameField.label=Username:
|
|
advancedSettingsConfigurable.proxy.passwordField.label=Password:
|
|
advancedSettingsConfigurable.connectionSettings.title=Connection Settings
|
|
advancedSettingsConfigurable.connectionSettings.connectionTimeout.label=Connection timeout (s):
|
|
advancedSettingsConfigurable.connectionSettings.readTimeout.label=Read timeout (s):
|
|
codebaseIndexing.task.title=Indexing codebase
|
|
dialog.deleteConversation.title=Delete Conversation
|
|
dialog.deleteConversation.description=Are you sure you want to delete this conversation?
|
|
dialog.tokenLimitExceeded.title=Token Limit Exceeded
|
|
dialog.tokenLimitExceeded.description=The maximum default token limit has been reached. Do you want to proceed with the conversation despite the higher messaging cost?
|
|
dialog.tokenSoftLimitExceeded.title=Soft Limit Exceeded
|
|
dialog.tokenSoftLimitExceeded.description=Warning: The 'git diff' output contains %d tokens, indicating a substantial amount of changes. Are you sure you want to continue?
|
|
dialog.cancel=Cancel
|
|
dialog.continue=Continue
|
|
editor.diff.title=CodeGPT Diff
|
|
editor.diff.local.content.title=CodeGPT suggested code
|
|
toolwindow.chat.editor.action.copy.title=Copy
|
|
toolwindow.chat.editor.action.copy.description=Copy generated code
|
|
toolwindow.chat.editor.action.copy.success=Code copied!
|
|
toolwindow.chat.editor.action.diff.title=Diff
|
|
toolwindow.chat.editor.action.diff.description=Diff editor code against the generated one
|
|
toolwindow.chat.editor.action.edit.title=Edit Source
|
|
toolwindow.chat.editor.action.disableEditing.title=Disable Editing
|
|
toolwindow.chat.editor.action.edit.description=Edit generated code
|
|
toolwindow.chat.editor.action.newFile.title=New File
|
|
toolwindow.chat.editor.action.newFile.description=Create new file from generated code
|
|
toolwindow.chat.editor.action.replaceSelection.title=Replace Selection
|
|
toolwindow.chat.editor.action.replaceSelection.description=Replace main editor selected code
|
|
toolwindow.chat.editor.action.expand=Show More (+%s rows)
|
|
toolwindow.chat.editor.action.collapse=Show Less
|
|
toolwindow.chat.response.action.reloadResponse.text=Reload Response
|
|
toolwindow.chat.response.action.reloadResponse.description=Reload response description
|
|
toolwindow.chat.response.action.deleteResponse.text=Delete Response
|
|
toolwindow.chat.response.action.deleteResponse.description=Delete response description
|
|
toolwindow.chat.youProCheckBox.text=Use GPT-4 model
|
|
toolwindow.chat.youProCheckBox.enable=Turn on for complex queries
|
|
toolwindow.chat.youProCheckBox.disable=Turn off for faster responses
|
|
toolwindow.chat.youProCheckBox.notAllowed=Enable by subscribing to YouPro plan
|
|
toolwindow.chat.textArea.emptyText=Ask me anything...
|
|
service.codegpt.title=CodeGPT
|
|
service.openai.title=OpenAI
|
|
service.custom.openai.title=Custom OpenAI
|
|
service.anthropic.title=Anthropic
|
|
service.azure.title=Azure
|
|
service.google.title=Google
|
|
service.llama.title=LLaMA C/C++ (Local)
|
|
service.ollama.title=Ollama (Local)
|
|
validation.error.model.notExists='%s' is not available, please select another model
|
|
validation.error.fieldRequired=This field is required.
|
|
validation.error.invalidEmail=The email you entered is invalid.
|
|
validation.error.mustBeNumber=Value must be number.
|
|
validation.error.mustBeBetweenZeroAndOne=Value must be between 0 and 1.
|
|
validation.error.mustBeGreaterThanZero=Value must be greater than 0
|
|
checkForUpdatesTask.title=Checking for CodeGPT update...
|
|
checkForUpdatesTask.notification.message=An update for CodeGPT is available.
|
|
checkForUpdatesTask.notification.installButton=Install update
|
|
llamaServerAgent.buildingProject.description=Building server...
|
|
llamaServerAgent.serverBootup.description=Booting up server...
|
|
notification.compilationError.description=CodeGPT has detected a compilation error. Would you like assistance in resolving it?
|
|
notification.compilationError.okLabel=Resolve errors
|
|
notification.completionError.description=Completion failed:<br/>%s
|
|
statusBar.widget.tooltip=Status
|
|
shared.promptTemplate=Prompt template:
|
|
shared.infillPromptTemplate=Infill template:
|
|
shared.apiVersion=API version:
|
|
shared.configuration=Configuration
|
|
shared.port=Port:
|
|
shared.notification.doNotShowAgain=Do not show again
|
|
codeCompletion.progress.title=Code completion in progress
|
|
imageAttachmentNotification.content=New image detected on desktop. Would you like to attach it to your current conversation?
|
|
imageAttachmentNotification.action=Attach image
|
|
action.attachImage=Attach Image
|
|
action.attachImageDescription=Attach an image
|
|
imageFileChooser.title=Select Image
|
|
imageAccordion.title=Attached image
|
|
shared.chatCompletions=Chat Completions
|
|
shared.codeCompletions=Code Completions
|
|
codeCompletionsForm.enableFeatureText=Enable code completions
|
|
codeCompletionsForm.maxTokensLabel=Max tokens:
|
|
codeCompletionsForm.maxTokensComment=The maximum number of tokens that will be generated in the code completion.
|