mirror of
https://github.com/carlrobertoh/ProxyAI.git
synced 2026-05-10 12:10:14 +00:00
227 lines
18 KiB
INI
227 lines
18 KiB
INI
project.label=CodeGPT
|
|
action.generateCommitMessage.title=CodeGPT: Generate Message
|
|
action.generateCommitMessage.description=Generate git commit message
|
|
action.generateCommitMessage.missingCredentials=Credentials not provided
|
|
action.includeFilesInContext.dialog.title=Include In Context
|
|
action.includeFilesInContext.dialog.description=Choose the files that you wish to include in the final prompt
|
|
action.includeFilesInContext.dialog.repeatableContext.label=Repeatable context:
|
|
action.includeFilesInContext.dialog.restoreToDefaults.label=Restore to Defaults
|
|
action.openSettings.title=Open Settings
|
|
action.openSettings.description=Open CodeGPT settings
|
|
settings.displayName=CodeGPT: Settings
|
|
settingsConfigurable.displayName.label=Display name:
|
|
settingsConfigurable.service.label=Selected provider:
|
|
settingsConfigurable.service.codegpt.apiKey.comment=You can find the API key in your <a href="https://codegpt.ee/account">User settings</a>.
|
|
settingsConfigurable.service.codegpt.chatCompletionModel.comment=Choose a model optimized for conversational interactions, including assistance with general queries and explanations.
|
|
settingsConfigurable.service.codegpt.codeCompletionModel.comment=Choose a model tailored for code completion-related tasks.
|
|
settingsConfigurable.service.custom.openai.apiKey.comment=A secret value stored in the system's Keychain or KeePass, depending on your OS. This approach is recommended over storing the secret in the header as plain text.
|
|
settingsConfigurable.service.openai.apiKey.comment=You can find the API key in your <a href="https://platform.openai.com/account/api-keys">User settings</a>.
|
|
settingsConfigurable.service.openai.organization.label=Organization:
|
|
settingsConfigurable.section.openai.organization.comment=Useful when you are part of multiple organizations <sup><strong>optional</strong></sup>
|
|
settingsConfigurable.service.google.apiKey.comment=You can find the API key in your <a href="https://aistudio.google.com/app/apikey">User settings</a>.
|
|
settingsConfigurable.service.google.model.comment=Note: Gemini Vision models <a href="https://ai.google.dev/gemini-api/docs/get-started/web?multi-turn-conversations-chat&hl=en#multi-turn-conversations-chat">do not yet support chats</a>.
|
|
settingsConfigurable.service.anthropic.apiKey.comment=You can find the API key in your <a href="https://console.anthropic.com/settings/keys">User settings</a>.
|
|
settingsConfigurable.service.anthropic.apiVersion.comment=We always recommend using the <a href="https://docs.anthropic.com/claude/reference/versions">latest API version</a> whenever possible.
|
|
settingsConfigurable.service.anthropic.model.comment=For details on model comparison metrics, see <a href="https://docs.anthropic.com/claude/docs/models-overview#model-comparison">model comparison</a>.
|
|
settingsConfigurable.service.azure.resourceName.label=Resource name:
|
|
settingsConfigurable.service.azure.resourceName.comment=The name of your Azure OpenAI resource.
|
|
settingsConfigurable.service.azure.deploymentId.label=Deployment ID:
|
|
settingsConfigurable.service.azure.deploymentId.comment=The name of your model deployment. You're required to first deploy a model before you can make calls.
|
|
settingsConfigurable.service.azure.apiVersion.comment=The API version to use for this operation. This follows the YYYY-MM-DD format.
|
|
settingsConfigurable.service.azure.bearerToken.label=Bearer token:
|
|
settingsConfigurable.service.azure.useApiKeyAuth.label=Use API key authentication
|
|
settingsConfigurable.service.azure.useActiveDirectoryAuth.label=Use Active Directory authentication
|
|
settingsConfigurable.service.llama.modelPreferences.title=Model Preferences
|
|
settingsConfigurable.service.llama.serverPreferences.title=Server Preferences
|
|
settingsConfigurable.service.llama.modelSize.label=Model size:
|
|
settingsConfigurable.service.llama.quantization.label=Quantization:
|
|
settingsConfigurable.service.llama.quantization.comment=Quantization is a technique to reduce the computational and memory costs of running inference. <a href="https://huggingface.co/docs/optimum/concept_guides/quantization">Learn more</a>
|
|
settingsConfigurable.service.llama.customModelPath.label=Model path:
|
|
settingsConfigurable.service.llama.customModelPath.comment=Only .gguf files are supported
|
|
settingsConfigurable.service.llama.promptTemplate.comment=Choose the template to use during interactions with the language model. Make sure it matches the custom model you're working with.
|
|
settingsConfigurable.service.llama.infillTemplate.comment=The template to use for code completions. Make sure the model you're working with supports code infilling.
|
|
settingsConfigurable.service.llama.downloadModelLink.label=Download Model
|
|
settingsConfigurable.service.llama.cancelDownloadLink.label=Cancel Downloading
|
|
settingsConfigurable.service.llama.linkToModel.label=Link to model
|
|
settingsConfigurable.service.llama.contextSize.label=Prompt context size:
|
|
settingsConfigurable.service.llama.contextSize.comment=The size of the prompt context. LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
|
|
settingsConfigurable.service.llama.threads.label=Threads:
|
|
settingsConfigurable.service.llama.threads.comment=The number of threads available to execute the model. It is not recommended to specify a number greater than the number of processor cores.
|
|
settingsConfigurable.service.llama.additionalParameters.label=Additional parameters:
|
|
settingsConfigurable.service.llama.additionalParameters.comment=<html>Additional command-line parameters for the server startup process, separated by commas. See the full <a href="https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md">list of options</a>.<p><i>Example: "--n-gpu-layers, 1, --no-mmap, --mlock"</i></p></html>
|
|
settingsConfigurable.service.llama.additionalBuildParameters.label=Additional build parameters:
|
|
settingsConfigurable.service.llama.additionalBuildParameters.comment=<html>Additional command-line parameters for the server build process, separated by commas. See the full <a href="https://github.com/ggerganov/llama.cpp/tree/master?tab=readme-ov-file#build">list of build options</a>.<p><i>Example: "LLAMA_CUDA=1,CUDA_DOCKER_ARCH=all"</i></p></html>
|
|
settingsConfigurable.service.llama.additionalEnvironmentVariables.label=Additional environment variables:
|
|
settingsConfigurable.service.llama.additionalEnvironmentVariables.comment=<html>Additional environment variables for the server build and run process, separated by whitespaces. Can be used to e.g. set CUDA variables (see the full <a href="https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars">list of env vars</a>)<p><i>Example: "CUDA_VISIBLE_DEVICES=0,1"</i></p></html>
|
|
settingsConfigurable.service.llama.baseHost.label=Base host:
|
|
settingsConfigurable.service.llama.baseHost.comment=URL to existing LLama server
|
|
settingsConfigurable.service.llama.startServer.label=Start server
|
|
settingsConfigurable.service.llama.startServer.opposite=Stop
|
|
settingsConfigurable.service.llama.stopServer.label=Stop server
|
|
settingsConfigurable.service.llama.stopServer.opposite=Start
|
|
settingsConfigurable.service.llama.progress.serverRunning=Server running
|
|
settingsConfigurable.service.llama.progress.serverStopped=Server stopped
|
|
settingsConfigurable.service.llama.progress.stoppingServer=Stopping server...
|
|
settingsConfigurable.service.llama.progress.startingServer=Starting server...
|
|
settingsConfigurable.service.llama.progress.downloadingModel.title=Downloading Model
|
|
settingsConfigurable.service.llama.progress.downloadingModelIndicator.text=Downloading %s...
|
|
settingsConfigurable.service.llama.overlay.modelNotDownloaded.text=Model is not downloaded
|
|
settingsConfigurable.shared.authentication.title=Authentication
|
|
settingsConfigurable.shared.requestConfiguration.title=Request Configuration
|
|
settingsConfigurable.shared.apiKey.label=API key:
|
|
settingsConfigurable.shared.apiKey.comment=API Key for authentication, added to 'Authorization' header as bearer (Optional)
|
|
settingsConfigurable.shared.baseHost.label=Base host:
|
|
settingsConfigurable.shared.model.label=Model:
|
|
configurationConfigurable.displayName=CodeGPT: Configuration
|
|
configurationConfigurable.table.title=Editor Actions
|
|
configurationConfigurable.table.emptyText=No actions configured
|
|
configurationConfigurable.table.header.actionColumnLabel=Action
|
|
configurationConfigurable.table.header.promptColumnLabel=Prompt
|
|
configurationConfigurable.table.action.revertToDefaults.text=Revert to Defaults
|
|
configurationConfigurable.table.action.addKeymap.text=Add Shortcut
|
|
configurationConfigurable.checkForPluginUpdates.label=Check for plugin updates automatically
|
|
configurationConfigurable.checkForNewScreenshots.label=Check for new screenshots automatically
|
|
configurationConfigurable.openNewTabCheckBox.label=Open a new chat on each action
|
|
configurationConfigurable.enableMethodNameGeneration.label=Enable method name lookup suggestions
|
|
configurationConfigurable.autoFormatting.label=Enable automatic code formatting
|
|
configurationConfigurable.autocompletionPostProcessing.label=Enable code completion post processing
|
|
configurationConfigurable.autocompletionContextAwareCheckBox.label=Enable project context aware code completion
|
|
configurationConfigurable.autocompletionGitContextCheckBox.label=Enable Git context for code completions
|
|
configurationConfigurable.section.assistant.title=Assistant Configuration
|
|
configurationConfigurable.section.assistant.temperatureField.label=Temperature:
|
|
configurationConfigurable.section.assistant.temperatureField.comment=The value of randomness. Must be between 0 and 1
|
|
configurationConfigurable.section.assistant.maxTokensField.label=Max completion tokens:
|
|
configurationConfigurable.section.assistant.maxTokensField.comment=The maximum capacity for completion.
|
|
settingsConfigurable.service.llama.topK.label=Top K:
|
|
settingsConfigurable.service.llama.topK.comment=Limit the next token selection to the K most probable tokens (default: 40)
|
|
settingsConfigurable.service.llama.topP.label=Top P:
|
|
settingsConfigurable.service.llama.topP.comment=Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P (default: 0.9)
|
|
settingsConfigurable.service.llama.minP.label=Min P:
|
|
settingsConfigurable.service.llama.minP.comment=Sets a minimum base probability threshold for token selection (default: 0.05)
|
|
settingsConfigurable.service.llama.repeatPenalty.label=Repeat penalty:
|
|
settingsConfigurable.service.llama.repeatPenalty.comment=Control the repetition of token sequences in the generated text (default: 1.1)
|
|
settingsConfigurable.service.custom.openai.testConnection.label=Test Connection
|
|
settingsConfigurable.service.custom.openai.presetTemplate.label=Preset template:
|
|
settingsConfigurable.service.custom.openai.url.label=URL:
|
|
settingsConfigurable.service.custom.openai.linkToDocs=Link to API docs
|
|
settingsConfigurable.service.custom.openai.connectionSuccess=Connection successful.
|
|
settingsConfigurable.service.custom.openai.connectionFailed=Connection failed.
|
|
settingsConfigurable.service.ollama.models.refresh=Refresh Models
|
|
configurationConfigurable.section.commitMessage.title=Commit Message Template
|
|
configurationConfigurable.section.commitMessage.systemPromptField.label=Prompt template:
|
|
advancedSettingsConfigurable.displayName=CodeGPT: Advanced Settings
|
|
advancedSettingsConfigurable.proxy.title=HTTP/SOCKS Proxy
|
|
advancedSettingsConfigurable.proxy.typeComboBoxField.label=Proxy:
|
|
advancedSettingsConfigurable.proxy.hostField.label=Host name:
|
|
advancedSettingsConfigurable.proxy.authCheckBoxField.label=Proxy authentication
|
|
advancedSettingsConfigurable.proxy.usernameField.label=Username:
|
|
advancedSettingsConfigurable.proxy.passwordField.label=Password:
|
|
advancedSettingsConfigurable.connectionSettings.title=Connection Settings
|
|
advancedSettingsConfigurable.connectionSettings.connectionTimeout.label=Connection timeout (s):
|
|
advancedSettingsConfigurable.connectionSettings.readTimeout.label=Read timeout (s):
|
|
dialog.deleteConversation.title=Delete Conversation
|
|
dialog.deleteConversation.description=Are you sure you want to delete this conversation?
|
|
dialog.tokenLimitExceeded.title=Token Limit Exceeded
|
|
dialog.tokenLimitExceeded.description=The maximum default token limit has been reached. Do you want to proceed with the conversation despite the higher messaging cost?
|
|
dialog.tokenSoftLimitExceeded.title=Soft Limit Exceeded
|
|
dialog.tokenSoftLimitExceeded.description=Warning: The 'git diff' output contains %d tokens, indicating a substantial amount of changes. Are you sure you want to continue?
|
|
dialog.continue=Continue
|
|
editor.diff.title=CodeGPT Diff
|
|
editor.diff.local.content.title=CodeGPT suggested code
|
|
toolwindow.chat.editor.action.copy.title=Copy
|
|
toolwindow.chat.editor.action.copy.description=Copy generated code
|
|
toolwindow.chat.editor.action.copy.success=Code copied!
|
|
toolwindow.chat.editor.action.autoApply.title=Auto Apply
|
|
toolwindow.chat.editor.action.autoApply.disabledTitle=This action is only available with CodeGPT provider
|
|
toolwindow.chat.editor.action.autoApply.description=Apply suggested changes automatically
|
|
toolwindow.chat.editor.action.autoApply.noActiveFile=Active file not found
|
|
toolwindow.chat.editor.action.autoApply.fileTooLarge=Active file too large to process
|
|
toolwindow.chat.editor.action.autoApply.accept=Accept
|
|
toolwindow.chat.editor.action.autoApply.reject=Reject
|
|
toolwindow.chat.editor.action.autoApply.error=Something went wrong while applying changes. {0}
|
|
toolwindow.chat.editor.action.autoApply.taskTitle=Apply changes
|
|
toolwindow.chat.editor.action.autoApply.loadingMessage=CodeGPT: Applying changes
|
|
toolwindow.chat.editor.action.edit.title=Edit Source
|
|
toolwindow.chat.editor.action.disableEditing.title=Disable Editing
|
|
toolwindow.chat.editor.action.newFile.title=New File
|
|
toolwindow.chat.editor.action.newFile.description=Create new file from generated code
|
|
toolwindow.chat.editor.action.replaceSelection.title=Replace Selection
|
|
toolwindow.chat.editor.action.insertAtCaret.title=Insert at Caret
|
|
toolwindow.chat.editor.action.insertAtCaret.description=Insert generated code after main editor caret position
|
|
toolwindow.chat.editor.action.expand=Show More (+%s rows)
|
|
toolwindow.chat.editor.action.collapse=Show Less
|
|
toolwindow.chat.response.action.reloadResponse.text=Reload Response
|
|
toolwindow.chat.response.action.reloadResponse.description=Reload response description
|
|
toolwindow.chat.response.action.deleteResponse.text=Delete Response
|
|
toolwindow.chat.response.action.deleteResponse.description=Delete response description
|
|
toolwindow.chat.textArea.emptyText=Ask anything... Use '@' to include additional context
|
|
service.codegpt.title=CodeGPT
|
|
service.openai.title=OpenAI
|
|
service.custom.openai.title=Custom OpenAI
|
|
service.anthropic.title=Anthropic
|
|
service.azure.title=Azure
|
|
service.google.title=Google
|
|
service.llama.title=LLaMA C/C++ (Local)
|
|
service.ollama.title=Ollama (Local)
|
|
validation.error.model.notExists='%s' is not available, please select another model
|
|
validation.error.fieldRequired=This field is required.
|
|
validation.error.mustBeNumber=Value must be number.
|
|
validation.error.mustBeBetweenZeroAndOne=Value must be between 0 and 1.
|
|
checkForUpdatesTask.title=Checking for CodeGPT update...
|
|
checkForUpdatesTask.notification.message=An update for CodeGPT is available.
|
|
checkForUpdatesTask.notification.installButton=Install update
|
|
llamaServerAgent.buildingProject.description=Building server...
|
|
llamaServerAgent.serverBootup.description=Booting up server...
|
|
notification.compilationError.description=CodeGPT has detected a compilation error. Would you like assistance in resolving it?
|
|
notification.compilationError.okLabel=Resolve errors
|
|
statusBar.widget.tooltip=Status
|
|
shared.promptTemplate=Prompt template:
|
|
shared.infillPromptTemplate=Infill template:
|
|
shared.apiVersion=API version:
|
|
shared.escToCancel=Esc to cancel
|
|
shared.cancel=Cancel
|
|
shared.configuration=Configuration
|
|
shared.port=Port:
|
|
shared.discard=Discard
|
|
shared.notification.doNotShowAgain=Do not show again
|
|
shared.loading=Loading...
|
|
shared.website=Website
|
|
imageAttachmentNotification.content=New image detected on desktop. Would you like to attach it to your current conversation?
|
|
imageAttachmentNotification.action=Attach image
|
|
action.attachImage=Attach Image
|
|
action.attachImageDescription=Attach an image
|
|
imageFileChooser.title=Select Image
|
|
imageAccordion.title=Attached image
|
|
shared.image=Image
|
|
shared.chatCompletions=Chat Completions
|
|
shared.codeCompletions=Code Completions
|
|
codeCompletionsForm.enableFeatureText=Enable code completions
|
|
codeCompletionsForm.parseResponseAsChatCompletions=Parse response as Chat Completions
|
|
editCodePopover.title=Edit Code
|
|
editCodePopover.textField.emptyText=Editing instructions...
|
|
editCodePopover.textField.followUp.emptyText=Ask a follow-up question
|
|
editCodePopover.textField.comment=Provide instructions for the code modification.
|
|
editCodePopover.submitButton.title=Submit Edit
|
|
editCodePopover.acceptButton.title=Accept Suggestion
|
|
editCodePopover.followUpButton.title=Submit Follow-up
|
|
smartTextPane.submitButton.title=Send Message
|
|
smartTextPane.submitButton.description=Send message
|
|
smartTextPane.stopButton.title=Stop
|
|
smartTextPane.stopButton.description=Stop completion
|
|
chatMessageResponseBody.webPages.title=WEB PAGES
|
|
chatMessageResponseBody.webDocs.startProgress.label=Analyzing web content...
|
|
addDocumentation.popup.title=Add Documentation
|
|
addDocumentation.popup.form.name.label=Name:
|
|
addDocumentation.popup.form.url.label=URL:
|
|
addDocumentation.popup.form.url.comment=Enter the full web address of the documentation.
|
|
addDocumentation.popup.form.saveCheckbox.label=Save for future reference
|
|
userMessagePanel.documentation.title=DOCUMENTATION
|
|
suggestionGroupItem.files.displayName=Files
|
|
suggestionGroupItem.folders.displayName=Folders
|
|
suggestionGroupItem.personas.displayName=Personas
|
|
suggestionGroupItem.docs.displayName=Docs
|
|
suggestionGroupItem.git.displayName=Git
|
|
suggestionActionItem.webSearch.displayName=Web
|
|
suggestionActionItem.viewDocumentations.displayName=View all docs
|
|
suggestionActionItem.createPersona.displayName=Create new persona
|
|
suggestionActionItem.createDocumentation.displayName=Create new documentation
|