mirror of
https://github.com/carlrobertoh/ProxyAI.git
synced 2026-05-12 14:10:29 +00:00
150 lines
No EOL
11 KiB
INI
150 lines
No EOL
11 KiB
INI
project.label=CodeGPT
|
|
notification.group.name=CodeGPT notification group
|
|
action.generateCommitMessage.title=Generate Message
|
|
action.generateCommitMessage.description=Generate commit message using OpenAI service
|
|
action.generateCommitMessage.serviceWarning=Messages can only be generated with OpenAI or Azure service
|
|
action.generateCommitMessage.missingCredentials=Credentials not provided
|
|
action.includeFilesInContext.title=Include In Context...
|
|
action.includeFilesInContext.dialog.title=Include In Context
|
|
action.includeFilesInContext.dialog.description=Choose the files that you wish to include in the final prompt
|
|
action.includeFilesInContext.dialog.promptTemplate.label=Prompt template:
|
|
action.includeFilesInContext.dialog.repeatableContext.label=Repeatable context:
|
|
action.includeFilesInContext.dialog.restoreToDefaults.label=Restore to Defaults
|
|
settings.displayName=CodeGPT: Settings
|
|
settings.openaiQuotaExceeded=OpenAI quota exceeded.
|
|
settingsConfigurable.displayName.label=Display name:
|
|
settingsConfigurable.service.label=Service:
|
|
settingsConfigurable.service.openai.apiKey.comment=You can find your Secret API key in your <a href="https://platform.openai.com/account/api-keys">User settings</a>.
|
|
settingsConfigurable.service.openai.organization.label=Organization:
|
|
settingsConfigurable.section.openai.organization.comment=Useful when you are part of multiple organizations <sup><strong>optional</strong></sup>
|
|
settingsConfigurable.service.azure.resourceName.label=Resource name:
|
|
settingsConfigurable.service.azure.resourceName.comment=The name of your Azure OpenAI resource.
|
|
settingsConfigurable.service.azure.deploymentId.label=Deployment ID:
|
|
settingsConfigurable.service.azure.deploymentId.comment=The name of your model deployment. You're required to first deploy a model before you can make calls.
|
|
settingsConfigurable.service.azure.apiVersion.label=API version:
|
|
settingsConfigurable.service.azure.apiVersion.comment=The API version to use for this operation. This follows the YYYY-MM-DD format.
|
|
settingsConfigurable.service.azure.bearerToken.label=Bearer token:
|
|
settingsConfigurable.service.azure.useApiKeyAuth.label=Use API key authentication
|
|
settingsConfigurable.service.azure.useActiveDirectoryAuth.label=Use Active Directory authentication
|
|
settingsConfigurable.service.you.email.label=Email address:
|
|
settingsConfigurable.service.you.password.label=Password:
|
|
settingsConfigurable.service.you.signIn.label=Sign In
|
|
settingsConfigurable.service.you.signOut.label=Sign Out
|
|
settingsConfigurable.service.you.displayResults.label=Display web search results
|
|
settingsConfigurable.service.you.authentication.title=Authentication (Optional)
|
|
settingsConfigurable.service.you.userInformation.title=User Information
|
|
settingsConfigurable.service.you.chatPreferences.title=Chat Preferences
|
|
settingsConfigurable.service.llama.modelPreferences.title=Model Preferences
|
|
settingsConfigurable.service.llama.serverPreferences.title=Server Preferences
|
|
settingsConfigurable.service.llama.modelSize.label=Model size:
|
|
settingsConfigurable.service.llama.quantization.label=Quantization:
|
|
settingsConfigurable.service.llama.quantization.comment=Quantization is a technique to reduce the computational and memory costs of running inference. <a href="https://huggingface.co/docs/optimum/concept_guides/quantization">Learn more</a>
|
|
settingsConfigurable.service.llama.promptTemplate.label=Prompt template:
|
|
settingsConfigurable.service.llama.useCustomModel.label=Use custom model
|
|
settingsConfigurable.service.llama.customModelPath.label=Model path:
|
|
settingsConfigurable.service.llama.customModelPath.comment=Only .gguf files are supported
|
|
settingsConfigurable.service.llama.downloadModelLink.label=Download Model
|
|
settingsConfigurable.service.llama.cancelDownloadLink.label=Cancel Downloading
|
|
settingsConfigurable.service.llama.linkToModel.label=Link to model
|
|
settingsConfigurable.service.llama.contextSize.label=Prompt context size:
|
|
settingsConfigurable.service.llama.contextSize.comment=The size of the prompt context. LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
|
|
settingsConfigurable.service.llama.threads.label=Threads:
|
|
settingsConfigurable.service.llama.threads.comment=The number of threads available to execute the model. It is not recommended to specify a number greater than the number of processor cores.
|
|
settingsConfigurable.service.llama.additionalParameters.label=Additional parameters:
|
|
settingsConfigurable.service.llama.additionalParameters.comment=<html>Additional command-line parameters for the server startup process, separated by commas. See the full <a href="https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md">list of options</a>.<p><i>Example: "--n-gpu-layers, 1, --no-mmap, --mlock"</i></p></html>
|
|
settingsConfigurable.service.llama.port.label=Port:
|
|
settingsConfigurable.service.llama.startServer.label=Start server
|
|
settingsConfigurable.service.llama.stopServer.label=Stop server
|
|
settingsConfigurable.service.llama.progress.serverRunning=Server running
|
|
settingsConfigurable.service.llama.progress.serverTerminated=Server terminated
|
|
settingsConfigurable.service.llama.progress.stoppingServer=Stopping a server...
|
|
settingsConfigurable.service.llama.progress.startingServer=Starting a server...
|
|
settingsConfigurable.service.llama.progress.downloadingModel.title=Downloading Model
|
|
settingsConfigurable.service.llama.progress.downloadingModelIndicator.text=Downloading %s...
|
|
settingsConfigurable.service.llama.overlay.modelNotDownloaded.text=Model is not downloaded
|
|
settingsConfigurable.shared.authentication.title=Authentication
|
|
settingsConfigurable.shared.requestConfiguration.title=Request Configuration
|
|
settingsConfigurable.shared.apiKey.label=API key:
|
|
settingsConfigurable.shared.baseHost.label=Base host:
|
|
settingsConfigurable.shared.path.label=Path:
|
|
settingsConfigurable.shared.model.label=Model:
|
|
configurationConfigurable.displayName=CodeGPT: Configuration
|
|
configurationConfigurable.table.title=Editor Actions
|
|
configurationConfigurable.table.emptyText=No actions configured
|
|
configurationConfigurable.table.header.actionColumnLabel=Action
|
|
configurationConfigurable.table.header.promptColumnLabel=Prompt
|
|
configurationConfigurable.table.action.revertToDefaults.text=Revert to Defaults
|
|
configurationConfigurable.table.action.addKeymap.text=Add Shortcut
|
|
configurationConfigurable.checkForPluginUpdates.label=Check for plugin updates automatically
|
|
configurationConfigurable.openNewTabCheckBox.label=Open a new chat on each action
|
|
configurationConfigurable.enableMethodNameGeneration.label=Enable method name lookup suggestions
|
|
configurationConfigurable.autoFormatting.label=Enable automatic code formatting
|
|
configurationConfigurable.section.assistant.title=Assistant Configuration
|
|
configurationConfigurable.section.assistant.systemPromptField.label=System prompt:
|
|
configurationConfigurable.section.assistant.systemPromptField.comment=The system message helps to set the behaviour of the assistant
|
|
configurationConfigurable.section.assistant.temperatureField.label=Temperature:
|
|
configurationConfigurable.section.assistant.temperatureField.comment=The value of randomness. Must be between 0 and 1
|
|
configurationConfigurable.section.assistant.maxTokensField.label=Max completion tokens:
|
|
configurationConfigurable.section.assistant.maxTokensField.comment=The maximum capacity for completion.
|
|
configurationConfigurable.section.commitMessage.title=Commit Message
|
|
configurationConfigurable.section.commitMessage.systemPromptField.label=Prompt:
|
|
configurationConfigurable.section.commitMessage.systemPromptField.comment=Custom system prompt used for commit message generation.
|
|
advancedSettingsConfigurable.displayName=CodeGPT: Advanced Settings
|
|
advancedSettingsConfigurable.proxy.title=HTTP/SOCKS Proxy
|
|
advancedSettingsConfigurable.proxy.typeComboBoxField.label=Proxy:
|
|
advancedSettingsConfigurable.proxy.hostField.label=Host name:
|
|
advancedSettingsConfigurable.proxy.portField.label=Port:
|
|
advancedSettingsConfigurable.proxy.authCheckBoxField.label=Proxy authentication
|
|
advancedSettingsConfigurable.proxy.usernameField.label=Username:
|
|
advancedSettingsConfigurable.proxy.passwordField.label=Password:
|
|
advancedSettingsConfigurable.connectionSettings.title=Connection Settings
|
|
advancedSettingsConfigurable.connectionSettings.connectionTimeout.label=Connection timeout (s):
|
|
advancedSettingsConfigurable.connectionSettings.readTimeout.label=Read timeout (s):
|
|
codebaseIndexing.task.title=Indexing codebase
|
|
dialog.deleteConversation.title=Delete Conversation
|
|
dialog.deleteConversation.description=Are you sure you want to delete this conversation?
|
|
dialog.tokenLimitExceeded.title=Token Limit Exceeded
|
|
dialog.tokenLimitExceeded.description=The maximum default token limit has been reached. Do you want to proceed with the conversation despite the higher messaging cost?
|
|
dialog.tokenSoftLimitExceeded.title=Soft Limit Exceeded
|
|
dialog.tokenSoftLimitExceeded.description=Warning: The 'git diff' output contains %d tokens, indicating a substantial amount of changes. Are you sure you want to continue?
|
|
dialog.cancel=Cancel
|
|
dialog.continue=Continue
|
|
editor.diff.title=CodeGPT Diff
|
|
editor.diff.local.content.title=CodeGPT suggested code
|
|
toolwindow.chat.editor.action.copy.title=Copy
|
|
toolwindow.chat.editor.action.copy.description=Copy generated code
|
|
toolwindow.chat.editor.action.copy.success=Code copied!
|
|
toolwindow.chat.editor.action.diff.title=Diff
|
|
toolwindow.chat.editor.action.diff.description=Diff editor code against the generated one
|
|
toolwindow.chat.editor.action.edit.title=Edit Source
|
|
toolwindow.chat.editor.action.disableEditing.title=Disable Editing
|
|
toolwindow.chat.editor.action.edit.description=Edit generated code
|
|
toolwindow.chat.editor.action.newFile.title=New File
|
|
toolwindow.chat.editor.action.newFile.description=Create new file from generated code
|
|
toolwindow.chat.editor.action.replaceSelection.title=Replace Selection
|
|
toolwindow.chat.editor.action.replaceSelection.description=Replace main editor selected code
|
|
toolwindow.chat.editor.action.expand=Show More (+%s rows)
|
|
toolwindow.chat.editor.action.collapse=Show Less
|
|
toolwindow.chat.response.action.reloadResponse.text=Reload Response
|
|
toolwindow.chat.response.action.reloadResponse.description=Reload response description
|
|
toolwindow.chat.response.action.deleteResponse.text=Delete Response
|
|
toolwindow.chat.response.action.deleteResponse.description=Delete response description
|
|
toolwindow.chat.youProCheckBox.text=Use GPT-4 model
|
|
toolwindow.chat.youProCheckBox.enable=Turn on for complex queries
|
|
toolwindow.chat.youProCheckBox.disable=Turn off for faster responses
|
|
toolwindow.chat.youProCheckBox.notAllowed=Enable by subscribing to YouPro plan
|
|
toolwindow.chat.textArea.emptyText=Ask me anything...
|
|
service.openai.title=OpenAI Service
|
|
service.azure.title=Azure Service
|
|
service.you.title=You.com Service (Free, Cloud)
|
|
service.llama.title=LLaMA C/C++ Port (Free, Local)
|
|
validation.error.fieldRequired=This field is required.
|
|
validation.error.invalidEmail=The email you entered is invalid.
|
|
validation.error.mustBeNumber=Value must be number.
|
|
validation.error.mustBeBetweenZeroAndOne=Value must be between 0 and 1.
|
|
checkForUpdatesTask.title=Checking for CodeGPT update...
|
|
checkForUpdatesTask.notification.message=An update for CodeGPT is available.
|
|
checkForUpdatesTask.notification.installButton=Install update
|
|
checkForUpdatesTask.notification.hideButton=Do not show again
|
|
llamaServerAgent.buildingProject.description=Building llama.cpp...
|
|
llamaServerAgent.serverBootup.description=Booting up server... |