diff --git a/chat.json b/chat.json
new file mode 100644
index 00000000..b7c04562
--- /dev/null
+++ b/chat.json
@@ -0,0 +1,248 @@
+{
+ "modelLoaderPlaceholder": "选择要加载的模型",
+ "systemPromptPlaceholder": "设置系统提示",
+ "userRoleText": "用户",
+ "assistantRoleText": "助手",
+ "addMessageButtonText": "添加",
+ "addMessageButtonText/toolTip": "在不触发预测的情况下将消息插入上下文中",
+ "sendMessageButtonText": "发送",
+ "sendMessageButtonText/toolTip": "将您的提示和对话历史发送给模型进行处理",
+ "ejectButtonText": "卸载",
+ "unloadTooltip": "从内存中卸载模型",
+ "cancelButtonText": "取消",
+ "loadButtonText": "加载",
+ "advancedSegmentText": "高级",
+ "chatSegmentText": "聊天",
+ "chatSidebarTitle": "聊天列表",
+ "newChatButton": "新建聊天",
+ "newFolderButton": "新建文件夹",
+ "viewModeLabel": "视图模式",
+ "noChatSelected": "未选择聊天",
+ "chatViewOptions": "聊天视图选项",
+ "uiControls/title": "显示设置",
+ "noChatSelectedPlaceholder": "请选择一个聊天",
+ "unnamedChat": "未命名聊天",
+ "emptyFolder": "文件夹为空",
+
+ "tokenCount": "token数",
+ "messageTokenCount": "输入token数",
+ "tokenCount/hint": "消息中的token数量。使用当前选定模型的分词器计算。\n\n需要加载模型。",
+ "messageTokenCount/hint": "消息中的token数量。使用当前选定模型的分词器计算。\n\n**不包括**附件中的token估计值。",
+
+ "notes": "对话笔记",
+ "notes/add/first": "添加笔记",
+ "notes/add/another": "再加一条笔记",
+ "notes/hint": "保存此聊天的笔记。笔记仅供您参考,不会发送给模型。所有更改将会自动保存。",
+ "notes/placeholder": "在这里键入您的笔记...",
+ "notes/delete": "删除笔记",
+ "notes/noteLabel": "笔记",
+ "notes/copyContent": "复制笔记内容",
+
+ "actions/sendMessage/error": "发送消息失败",
+ "actions/loadModel/error": "🥲 加载模型失败",
+ "actions/addFile": "[实验性] 将文件附加到此消息\n(.pdf, 纯文本, 或 .docx)",
+ "actions/addFile/label": "附加文件",
+ "actions/changeRole": "在用户和助手角色之间切换。\n\n这对于引导对话朝特定方向发展非常有用。\n\n可用于构建‘少样本学习’或‘情境学习’场景",
+ "actions/addImage": "添加图片",
+ "actions/deleteMessage": "删除消息",
+ "actions/deleteMessage/confirmation": "您确定要删除这条消息吗?",
+ "actions/copyMessage": "复制消息",
+ "actions/editMessage": "编辑消息",
+ "actions/editMessage/cannotEditPreprocessed": "无法编辑预处理的消息,因为它们在运行预处理器后会被覆盖。要编辑消息,您可以:\n\n - 切换到原始消息并对其进行编辑,或者\n - 更改预处理器,使其产生所需的输出。",
+ "actions/regenerateMessage": "重新生成消息",
+ "actions/regenerateMessage/error": "重新生成消息失败",
+ "actions/branchChat": "在此消息之后分支聊天",
+ "actions/branchChat/error": "分支聊天失败",
+ "actions/continueAssistantMessage": "继续助手消息",
+ "actions/continueAssistantMessage/error": "继续助手消息失败",
+ "actions/predictNext": "生成AI响应",
+ "actions/predictNext/error": "生成AI响应失败",
+ "actions/loadLastModel": "重新加载上次使用的模型",
+ "actions/loadLastModel/tooltip": "点击以加载上次与该聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/loadLastModel/error": "加载上次使用的模型失败。",
+ "actions/continueCurrentModel": "使用当前模型",
+ "actions/continueCurrentModel/tooltip": "当前模型:{{currentModel}}",
+ "actions/changeToLastUsedModel": "加载 {{lastModel}}",
+ "actions/changeToLastUsedModel/error": "切换到上次使用的模型失败。",
+ "actions/changeToLastUsedModel/tooltip": "您上次在这个聊天中发送消息时使用了不同的模型。点击以卸载当前选定的模型({{currentModel}})并加载上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/switchToLastUsedModel": "切换到 {{lastModel}}",
+ "actions/switchToLastUsedModel/tooltip": "点击以切换到上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/loadModel": "加载模型",
+ "actions/clearLastUsedModel": "清除上次使用的模型",
+ "actions/clearLastUsedModel/error": "清除上次使用的模型失败。",
+ "actions/toggleViewingProcessed/currentlyFalse": "当前查看的是原始消息。点击以查看预处理后的消息。",
+ "actions/toggleViewingProcessed/currentlyTrue": "当前查看的是预处理后的消息。点击以查看原始消息。",
+ "actions/toggleViewingProcessed/hint": "在消息发送给模型之前,它可能会被提示预处理器预处理。点击以切换查看原始消息和预处理后的消息。只有预处理后的消息会发送给模型。",
+ "editMessageConfirm/title": "保留更改?",
+ "editMessageConfirm/message": "您已对消息进行了更改。您想要保留这些更改吗?",
+ "editMessageConfirm/keepEditing": "继续编辑",
+ "editMessageConfirm/save": "保存",
+ "editMessageConfirm/discard": "放弃更改",
+ "tokenCount/totalNotAvailable": "token:{{current}}",
+ "tokenCount/totalAvailable": "token:{{current}}/{{total}}",
+ "tokenCount/totalAvailablePercentage": "上下文已满 {{percentage}}%",
+ "tokenCount/contextOverflow": "未经处理的上下文大于模型的最大token限制。根据您的上下文溢出策略,上下文可能会被截断,或者消息可能不会被发送。",
+ "modelLoader/manualLoadParams/label": "手动选择模型加载参数",
+ "modelLoader/manualLoadParams/hint/before": "(或按住",
+ "modelLoader/manualLoadParams/hint/after": ")",
+ "actions/move/error": "移动失败",
+ "actions/rename/error": "重命名失败",
+ "actions/createChatAtRoot": "新建聊天...",
+ "actions/createChatAtRoot/error": "在根目录创建聊天失败",
+ "actions/createFolderAtRoot": "新建文件夹...",
+ "actions/createFolderAtRoot/error": "在根目录创建文件夹失败",
+ "actions/openInFolder/mac": "在 Finder 中显示",
+ "actions/openInFolder/pc": "在文件资源管理器中显示",
+
+ "actions/createChat/error": "创建聊天失败",
+ "actions/deleteChat/errorTitle": "删除聊天失败",
+
+ "userFile/fileSizeLimit": "文件大小限制为",
+ "userFile/noImageSupport": "模型不支持图片输入",
+ "userFile/errorPrefix": "错误 - ",
+ "userFile/supportedImagePrefix": "不支持的图片类型 - 仅支持",
+ "userFile/supportedImageSuffix": "。",
+ "userFile/unsupportedFileType": "不支持的文件类型 - 仅支持图片、PDF 和 .txt 文件。",
+ "userFile/maxFilesPerMessage": "每条消息的最大文件数已达到。不能添加超过 {{files}} 个文件。",
+ "userFile/maxFileSizePerMessage": "每条消息的最大文件大小已达到。不能添加超过 {{size}} 的文件。",
+ "userFile/maxFileSizePerConversation": "会话的文件大小已达上限,无法添加大于 {{size}} 的文件。",
+ "userFile/failedToUploadError/title": "文件上传失败",
+ "userFile/failedToAddFile/title": "文件添加到对话失败",
+ "errorTitle": "错误",
+ "userFile/chatTerminalDocumentsCount_one": "对话中有 {{count}} 个文档",
+ "userFile/chatTerminalDocumentsCount_other": "对话中有 {{count}} 个文档",
+
+ "prediction/busyModel/title": "模型忙碌中",
+ "prediction/busyModel/message": "请等待模型完成后再试",
+ "prediction/noModel/title": "未选择模型",
+ "prediction/modelLoading": "消息已排队,将在模型加载完成后发送",
+ "prediction/noModel/message": "选择一个模型以发送消息",
+ "prediction/unloadModel/error": "卸载模型失败",
+
+ "retrieval/user/processingLabel": "AI 正在思考...",
+ "retrieval/powerUser/intermediateStepsHidden": "中间步骤已隐藏。点击以展开。",
+ "retrieval/actions/clickToExpand": "点击以展开中间步骤",
+ "retrieval/actions/clickToCollapse": "点击以折叠中间步骤",
+
+ "style": "聊天外观",
+
+ "style/viewMode/markdown": "Markdown",
+ "style/viewMode/plaintext": "纯文本",
+ "style/viewMode/monospace": "等宽字体",
+
+ "speculativeDecodingVisualization/toggle": "可视化已采纳的草稿token",
+ "speculativeDecodingVisualization/fromDraftModel_one": "已采纳的草稿token",
+ "speculativeDecodingVisualization/fromDraftModel_other": "已采纳的草稿token",
+ "speculativeDecodingVisualization/cannotChangeViewMode": "可视化草稿token时无法切换显示模式。",
+
+ "style/fontSize/label": "字体大小",
+ "style/fontSize/medium": "默认",
+ "style/fontSize/large": "大",
+ "style/fontSize/small": "小",
+
+ "style/debugBlocks/label": "显示调试信息块",
+
+ "style/thinkingUI/label": "默认展开推理块",
+ "style/chatFullWidth/label": "聊天容器宽度适应窗口",
+
+ "style/chatUtilityMenusShowLabel/label": "显示聊天实用工具菜单",
+
+ "messageBlocks": {
+ "expandBlockTooltip": "展开内容",
+ "collapseBlockTooltip": "收起内容",
+ "debug": {
+ "label": "调试信息",
+ "collapseTooltip": "收起调试信息块",
+ "expandTooltip": "展开调试信息块"
+ }
+ },
+
+ "chatTabOptions/clearAllMessages": "清空所有聊天记录...",
+ "chatTabOptions/duplicateChat": "复制聊天",
+
+ "topBarActions/duplicateChat": "复制聊天",
+ "topBarActions/clearChat": "清除所有消息",
+ "topBarActions/clearChatConfirmation": "您确定要清除此聊天中的所有消息吗?",
+ "topBarActions/clearChatCancel": "取消",
+ "topBarActions/clearChatDelete": "全部清除",
+
+ "noModels.indexing": "正在索引模型文件...(这可能需要一段时间)",
+ "noModels.downloading": "正在下载您的第一个LLM...",
+ "noModels": "还没有LLM!下载一个开始吧!",
+
+ "plugins": {
+ "pluginTrigger": {
+ "noPlugins": "插件",
+ "multiplePlugins": "{{dynamicValue}} 个插件"
+ },
+ "pluginSelect": {
+ "title": "插件",
+ "dropdown": {
+ "configure": "配置",
+ "disable": "禁用",
+ "fork": "派生",
+ "uninstall": "卸载"
+ },
+ "actionButtons": {
+ "create": "+新建",
+ "import": "导入",
+ "discover": "发现"
+ },
+ "recentlyCreated": {
+ "title": "最近创建的插件",
+ "placeholder": "你创建的插件会显示在这里"
+ },
+ "startRunningDevelopmentPlugin/error": "开发模式插件启动失败",
+ "stopRunningDevelopmentPlugin/error": "开发模式插件停止失败",
+ "forceReInitPlugin/error": "重启插件失败"
+ },
+ "pluginConfiguration": {
+ "title": "插件配置",
+ "selectAPlugin": "选择一个插件以编辑其配置",
+ "preprocessorAndGenerator": "此插件包含自定义预处理器和生成器",
+ "generatorOnly": "此插件包含自定义生成器",
+ "preprocessorOnly": "此插件包含自定义预处理器"
+ },
+ "instructions": {
+ "runTheFollowing": "要运行你的插件,请打开终端并输入:",
+ "pushTo": "将插件推送到 Hub 与他人分享(可选)",
+ "createdSuccessfully": "插件创建成功",
+ "creatingPlugin": "正在创建插件...",
+ "projectFilesTitle": "项目文件",
+ "buttons": {
+ "documentation": "文档",
+ "dismiss": "关闭",
+ "publish": "发布",
+ "openInZed": "在 Zed 中打开",
+ "openInVscode": "在 VS Code 中打开",
+ "revealInFinder": "在 Finder 中显示",
+ "openInFileExplorer": "在文件资源管理器中打开"
+ }
+ },
+ "localFork": {
+ "error": "创建插件的本地副本失败"
+ },
+ "restartErrorPlugin/error": "重启插件失败"
+ },
+
+ "genInfo": {
+ "tokensPerSecond": "{{tokensPerSecond}} token/s",
+ "predictedTokensCount": "{{predictedTokensCount}} token",
+ "timeToFirstTokenSec": "首个token用时 {{timeToFirstTokenSec}} s",
+ "stopReason": "停止原因: {{stopReason}}",
+ "stopReason.userStopped": "用户已停止",
+ "stopReason.modelUnloaded": "模型已卸载",
+ "stopReason.failed": "生成失败",
+ "stopReason.eosFound": "检测到 EOS token",
+ "stopReason.stopStringFound": "发现停止字符串",
+ "stopReason.toolCalls": "工具调用",
+ "stopReason.maxPredictedTokensReached": "达到最大预测词元",
+ "stopReason.contextLengthReached": "达到上下文长度上限",
+ "speculativeDecodedBy": "草稿模型:{{decodedBy}}",
+ "speculativeDecodingStats": "已采纳 {{accepted}}/{{total}} 个草稿token({{percentage}}%)"
+ },
+
+ "tabs": {
+ "systemPromptEditorTab.headerLabel": "编辑系统提示词"
+ }
+}
diff --git a/config.json b/config.json
new file mode 100644
index 00000000..942101cd
--- /dev/null
+++ b/config.json
@@ -0,0 +1,621 @@
+{
+ "noInstanceSelected": "未选择模型实例",
+ "resetToDefault": "重置",
+ "showAdvancedSettings": "显示高级设置",
+ "showAll": "全部",
+ "basicSettings": "基础",
+ "configSubtitle": "加载或保存预设并尝试模型参数覆盖",
+ "inferenceParameters/title": "预测参数",
+ "inferenceParameters/info": "尝试影响预测的参数。",
+ "generalParameters/title": "通用",
+ "samplingParameters/title": "采样",
+ "basicTab": "基础",
+ "advancedTab": "高级",
+ "advancedTab/title": "🧪 高级配置",
+ "advancedTab/expandAll": "展开所有",
+ "advancedTab/overridesTitle": "配置覆盖",
+ "advancedTab/noConfigsText": "您没有未保存的更改 - 编辑上方值以在此处查看覆盖。",
+ "loadInstanceFirst": "加载模型以查看可配置参数",
+ "noListedConfigs": "无可配置参数",
+ "generationParameters/info": "尝试影响文本生成的基础参数。",
+ "loadParameters/title": "加载参数",
+ "loadParameters/description": "控制模型初始化和加载到内存的方式的设置。",
+ "loadParameters/reload": "重新加载以应用更改",
+ "loadParameters/reload/error": "重新加载模型失败",
+ "discardChanges": "放弃更改",
+ "loadModelToSeeOptions": "加载模型以查看选项",
+ "schematicsError.title": "配置结构在以下字段存在错误:",
+ "manifestSections": {
+ "structuredOutput/title": "结构化输出",
+ "speculativeDecoding/title": "投机解码",
+ "sampling/title": "采样",
+ "settings/title": "设置",
+ "toolUse/title": "工具调用",
+ "promptTemplate/title": "提示词模板",
+ "customFields/title": "自定义字段"
+ },
+
+ "llm.prediction.systemPrompt/title": "系统提示",
+ "llm.prediction.systemPrompt/description": "使用此字段向模型提供背景指令,如一套规则、约束或一般要求。",
+ "llm.prediction.systemPrompt/subTitle": "AI 指南",
+ "llm.prediction.systemPrompt/openEditor": "编辑器",
+ "llm.prediction.systemPrompt/closeEditor": "关闭编辑器",
+ "llm.prediction.systemPrompt/openedEditor": "在编辑器中打开...",
+ "llm.prediction.systemPrompt/edit": "编辑系统提示符...",
+ "llm.prediction.systemPrompt/addInstructionsWithMore": "添加说明...",
+ "llm.prediction.systemPrompt/addInstructions": "添加说明",
+ "llm.prediction.temperature/title": "温度",
+ "llm.prediction.temperature/subTitle": "引入多少随机性。0 将始终产生相同的结果,而较高值将增加创造性和变化。",
+ "llm.prediction.temperature/info": "来自 llama.cpp 帮助文档:\"默认值为 <{{dynamicValue}}>,它在随机性和确定性之间提供了平衡。极端情况下,温度为 0 会始终选择最可能的下一个token,导致每次运行的输出相同\"",
+ "llm.prediction.llama.sampling/title": "采样",
+ "llm.prediction.topKSampling/title": "Top K 采样",
+ "llm.prediction.topKSampling/subTitle": "将下一个token限制为模型预测的前 k 个最可能的token。作用类似于温度",
+ "llm.prediction.topKSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-k 采样是一种仅从模型预测的前 k 个最可能的token中选择下一个token的文本生成方法。\n\n它有助于减少生成低概率或无意义token的风险,但也可能限制输出的多样性。\n\n更高的 top-k 值(例如,100)将考虑更多token,从而生成更多样化的文本,而较低的值(例如,10)将专注于最可能的token,生成更保守的文本。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.llama.cpuThreads/title": "CPU 线程",
+ "llm.prediction.llama.cpuThreads/subTitle": "推理期间使用的 CPU 线程数",
+ "llm.prediction.llama.cpuThreads/info": "计算期间要使用的线程数。增加线程数并不总是与更好的性能相关联。默认值为 <{{dynamicValue}}>。",
+ "llm.prediction.maxPredictedTokens/title": "限制响应长度",
+ "llm.prediction.maxPredictedTokens/subTitle": "可选地限制 AI 响应的长度",
+ "llm.prediction.maxPredictedTokens/info": "控制聊天机器人的响应最大长度。开启以设置响应的最大长度限制,或关闭以让聊天机器人决定何时停止。",
+ "llm.prediction.maxPredictedTokens/inputLabel": "最大响应长度(token)",
+ "llm.prediction.maxPredictedTokens/wordEstimate": "约 {{maxWords}} 词",
+ "llm.prediction.repeatPenalty/title": "重复惩罚",
+ "llm.prediction.repeatPenalty/subTitle": "多大程度上避免重复相同的token",
+ "llm.prediction.repeatPenalty/info": "来自 llama.cpp 帮助文档:\"有助于防止模型生成重复或单调的文本。\n\n更高的值(例如,1.5)将更强烈地惩罚重复,而更低的值(例如,0.9)将更为宽容。\" • 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.minPSampling/title": "最小 P 采样",
+ "llm.prediction.minPSampling/subTitle": "token被选为输出的最低基本概率",
+ "llm.prediction.minPSampling/info": "来自 llama.cpp 帮助文档:\n\n相对于最可能token的概率,token被视为考虑的最低概率。必须在 [0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.topPSampling/title": "Top P 采样",
+ "llm.prediction.topPSampling/subTitle": "可能的下一个token的最小累积概率。作用类似于温度",
+ "llm.prediction.topPSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-p 采样,也称为核心采样,是另一种文本生成方法,从累积概率至少为 p 的token子集中选择下一个token。\n\n这种方法通过同时考虑token的概率和要从中采样的token数量,在多样性和质量之间提供了平衡。\n\n更高的 top-p 值(例如,0.95)将导致更多样化的文本,而较低的值(例如,0.5)将生成更集中和保守的文本。必须在 (0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.stopStrings/title": "停止字符串",
+ "llm.prediction.stopStrings/subTitle": "应该停止模型生成更多token的字符串",
+ "llm.prediction.stopStrings/info": "遇到特定字符串时将停止模型生成更多token",
+ "llm.prediction.stopStrings/placeholder": "输入一个字符串并按 ⏎",
+ "llm.prediction.contextOverflowPolicy/title": "上下文溢出",
+ "llm.prediction.contextOverflowPolicy/subTitle": "当对话超出模型处理能力时,模型应该如何表现",
+ "llm.prediction.contextOverflowPolicy/info": "决定当对话超过模型的工作内存('上下文')大小时该怎么做",
+ "llm.prediction.llama.frequencyPenalty/title": "频率惩罚",
+ "llm.prediction.llama.presencePenalty/title": "存在惩罚",
+ "llm.prediction.llama.tailFreeSampling/title": "尾部自由采样",
+ "llm.prediction.llama.locallyTypicalSampling/title": "局部典型采样",
+ "llm.prediction.llama.xtcProbability/title": "XTC 采样概率",
+ "llm.prediction.llama.xtcProbability/subTitle": "XTC(排除顶选)采样器将在每个生成token时以该概率激活。XTC 采样有助于提升创造力,减少陈词滥调",
+ "llm.prediction.llama.xtcProbability/info": "XTC(排除顶选)采样将以该概率在每个token生成时激活。XTC 采样通常可以提升创造力并减少陈词滥调",
+ "llm.prediction.llama.xtcThreshold/title": "XTC 采样阈值",
+ "llm.prediction.llama.xtcThreshold/subTitle": "XTC(排除顶选)阈值。在 `xtc-probability` 概率下,查找概率介于 `xtc-threshold` 和 0.5 之间的token,并仅保留其中概率最低的一个",
+ "llm.prediction.llama.xtcThreshold/info": "XTC(排除顶选)阈值。在 `xtc-probability` 概率下,查找概率介于 `xtc-threshold` 和 0.5 之间的所有token,仅保留概率最低的一个,其余全部移除",
+ "llm.prediction.mlx.topKSampling/title": "Top K 采样",
+ "llm.prediction.mlx.topKSampling/subTitle": "将下一个token限制为概率最高的前 k 个token。作用类似于温度",
+ "llm.prediction.mlx.topKSampling/info": "仅从概率最高的前 k 个token中选择下一个token,作用类似于温度",
+ "llm.prediction.onnx.topKSampling/title": "Top K 采样",
+ "llm.prediction.onnx.topKSampling/subTitle": "将下一个token限制为前 k 个最可能的token。作用类似于温度",
+ "llm.prediction.onnx.topKSampling/info": "来自 ONNX 文档:\n\n保留最高概率词汇表token的数量以进行 top-k 过滤\n\n• 默认情况下此过滤器关闭",
+ "llm.prediction.onnx.repeatPenalty/title": "重复惩罚",
+ "llm.prediction.onnx.repeatPenalty/subTitle": "多大程度上避免重复相同的token",
+ "llm.prediction.onnx.repeatPenalty/info": "更高的值阻止模型重复自身",
+ "llm.prediction.onnx.topPSampling/title": "Top P 采样",
+ "llm.prediction.onnx.topPSampling/subTitle": "可能的下一个token的最小累积概率。作用类似于温度",
+ "llm.prediction.onnx.topPSampling/info": "来自 ONNX 文档:\n\n仅保留累积概率达到或超过 TopP 的最可能token用于生成\n\n• 默认情况下此过滤器关闭",
+ "llm.prediction.seed/title": "种子",
+ "llm.prediction.structured/title": "结构化输出",
+ "llm.prediction.structured/info": "结构化输出",
+ "llm.prediction.structured/description": "高级:您可以提供[JSON Schema](https://json-schema.org/learn/miscellaneous-examples)来强制执行模型中的特定输出格式。阅读[留档](https://lmstudio.ai/docs/advanced/structured-output)了解更多",
+ "llm.prediction.tools/title": "工具调用",
+ "llm.prediction.tools/description": "高级功能:你可以提供 JSON 格式的工具列表,模型可请求调用这些工具。详情请查阅[文档](https://lmstudio.ai/docs/advanced/tool-use)",
+ "llm.prediction.tools/serverPageDescriptionAddon": "通过服务端 API 调用时,请将其作为 `tools` 字段传入请求体",
+ "llm.prediction.promptTemplate/title": "提示模板",
+ "llm.prediction.promptTemplate/subTitle": "聊天中消息发送给模型的格式。更改此设置可能会引入意外行为 - 确保您知道自己在做什么!",
+ "llm.prediction.speculativeDecoding.numDraftTokensExact/title": "草稿生成token数",
+ "llm.prediction.speculativeDecoding.numDraftTokensExact/subTitle": "每生成一个主模型token,草稿模型生成的token数量。平衡计算量与收益,选择合适的数值",
+ "llm.prediction.speculativeDecoding.minContinueDraftingProbability/title": "草稿概率阈值",
+ "llm.prediction.speculativeDecoding.minContinueDraftingProbability/subTitle": "仅当token概率高于该阈值时才继续草稿。值越高风险越低,收益也越低",
+ "llm.prediction.speculativeDecoding.minDraftLengthToConsider/title": "最小草稿长度",
+ "llm.prediction.speculativeDecoding.minDraftLengthToConsider/subTitle": "草稿长度低于该值将被主模型忽略。值越高风险越低,收益也越低",
+ "llm.prediction.speculativeDecoding.maxTokensToDraft/title": "最大草稿长度",
+ "llm.prediction.speculativeDecoding.maxTokensToDraft/subTitle": "草稿中允许的最大token数。如果所有token概率都高于阈值,则为上限。值越低风险越低,收益也越低",
+ "llm.prediction.speculativeDecoding.draftModel/title": "草稿模型",
+ "llm.prediction.reasoning.parsing/title": "推理过程解析方式",
+ "llm.prediction.reasoning.parsing/subTitle": "控制模型输出中推理过程的解析方式",
+
+ "llm.load.mainGpu/title": "主 GPU",
+ "llm.load.mainGpu/subTitle": "用于模型计算的 GPU 优先级",
+ "llm.load.mainGpu/placeholder": "选择主 GPU...",
+ "llm.load.splitStrategy/title": "拆分策略",
+ "llm.load.splitStrategy/subTitle": "如何跨 GPU 拆分模型计算",
+ "llm.load.splitStrategy/placeholder": "选择拆分策略...",
+ "llm.load.offloadKVCacheToGpu/title": "将 KV 缓存卸载到 GPU 内存",
+ "llm.load.offloadKVCacheToGpu/subTitle": "将 KV 缓存卸载到 GPU 内存。这可以提高性能但需要更多 GPU 内存",
+ "load.gpuStrictVramCap/title": "限制模型卸载至专用 GPU 内存",
+ "load.gpuStrictVramCap.customSubTitleOff": "关闭:若专用 GPU 内存已满,允许将模型权重卸载至共享内存",
+ "load.gpuStrictVramCap.customSubTitleOn": "开启:系统将限制模型权重的卸载仅限于专用 GPU 内存及 RAM 。上下文仍可能使用共享内存",
+ "load.gpuStrictVramCap.customGpuOffloadWarning": "模型的卸载仅限于专用 GPU 内存。实际卸载的层数可能会有所不同",
+ "load.allGpusDisabledWarning": "所有 GPU 目前均被禁用。请启用至少一个以进行卸载",
+
+ "llm.load.contextLength/title": "上下文长度",
+ "llm.load.contextLength/subTitle": "模型可以一次性关注的token最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
+ "llm.load.contextLength/info": "指定模型一次可以考虑的最大token数量,影响其处理过程中保留的上下文量",
+ "llm.load.contextLength/warning": "设置较高的上下文长度值会对内存使用产生显著影响",
+ "llm.load.seed/title": "种子",
+ "llm.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机",
+ "llm.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
+
+ "llm.load.llama.evalBatchSize/title": "评估批处理大小",
+ "llm.load.llama.evalBatchSize/subTitle": "每次处理的输入token数量。增加此值会提高性能,但会增加内存使用量",
+ "llm.load.llama.evalBatchSize/info": "设置评估期间一起处理的示例数量,影响速度和内存使用",
+ "llm.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
+ "llm.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
+ "llm.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
+ "llm.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
+ "llm.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
+ "llm.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
+ "llm.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
+ "llm.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
+ "llm.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
+ "llm.load.llama.flashAttention/title": "快速注意力",
+ "llm.load.llama.flashAttention/subTitle": "降低某些模型的内存使用量和生成时间",
+ "llm.load.llama.flashAttention/info": "加速注意力机制,实现更快、更高效的处理",
+ "llm.load.numExperts/title": "专家数量",
+ "llm.load.numExperts/subTitle": "模型中使用的专家数量",
+ "llm.load.numExperts/info": "模型中使用的专家数量",
+ "llm.load.llama.keepModelInMemory/title": "保持模型在内存中",
+ "llm.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
+ "llm.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
+ "llm.load.llama.useFp16ForKVCache/title": "使用 FP16 用于 KV 缓存",
+ "llm.load.llama.useFp16ForKVCache/info": "通过以半精度(FP16)存储缓存来减少内存使用",
+ "llm.load.llama.tryMmap/title": "尝试 mmap()",
+ "llm.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
+ "llm.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
+ "llm.load.llama.cpuThreadPoolSize/title": "CPU 线程池大小",
+ "llm.load.llama.cpuThreadPoolSize/subTitle": "为模型计算分配的 CPU 线程池线程数",
+ "llm.load.llama.cpuThreadPoolSize/info": "分配用于模型计算的 CPU 线程池线程数量。线程数增加未必总能带来更佳性能。默认值为 <{{dynamicValue}}>。",
+ "llm.load.llama.kCacheQuantizationType/title": "K 缓存量化类型",
+ "llm.load.llama.kCacheQuantizationType/subTitle": "较低的量化类型可降低内存占用,但可能影响模型质量,不同模型效果差异大。",
+ "llm.load.llama.vCacheQuantizationType/title": "V 缓存量化类型",
+ "llm.load.llama.vCacheQuantizationType/subTitle": "较低的量化类型可降低内存占用,但可能影响模型质量,不同模型效果差异大。",
+ "llm.load.llama.vCacheQuantizationType/turnedOnWarning": "⚠️ 如未启用Flash Attention,请务必关闭该选项",
+ "llm.load.llama.vCacheQuantizationType/disabledMessage": "仅在启用Flash Attention时可用",
+ "llm.load.llama.vCacheQuantizationType/invalidF32MetalState": "⚠️ 使用 F32 时请禁用Flash Attention",
+ "llm.load.useUnifiedKvCache/title": "统一 KV 缓存",
+ "llm.load.useUnifiedKvCache/subTitle": "控制并发预测是否共享单个 KV 缓存以节省内存。禁用此选项可确保每个预测都能利用完整的上下文长度,但会使用更多内存",
+ "llm.load.numParallelSessions/title": "最大并发预测数",
+ "llm.load.numParallelSessions/subTitle": "模型在给定时间可以运行的最大预测数。并发增加时,每个单独预测的速度可能会降低,但每个预测的启动速度会更快,总吞吐量可能更高",
+ "llm.load.numCpuExpertLayersRatio/title": "强制将 MoE 权重加载到 CPU 的层数",
+ "llm.load.numCpuExpertLayersRatio/subTitle": "强制将专家层加载到 CPU 的层数。节省 VRAM,并且可能比部分 GPU 卸载更快。如果模型完全适合 VRAM,则不建议使用。",
+ "llm.load.numCpuExpertLayersRatio/info": "指定强制将专家层加载到 CPU 的层数。将注意力层保留在 GPU 上,在保持推理速度相当快的同时节省 VRAM。",
+ "llm.load.mlx.kvCacheBits/title": "KV 缓存量化位数",
+ "llm.load.mlx.kvCacheBits/subTitle": "KV 缓存量化使用的位数",
+ "llm.load.mlx.kvCacheBits/info": "设置 KV 缓存需要量化成的位数",
+ "llm.load.mlx.kvCacheBits/turnedOnWarning": "启用 KV 缓存量化时,上下文长度设置将被忽略",
+ "llm.load.mlx.kvCacheGroupSize/title": "KV 缓存量化分组大小",
+ "llm.load.mlx.kvCacheGroupSize/subTitle": "量化操作时分组的大小,组越大内存占用越低,但模型质量可能下降",
+ "llm.load.mlx.kvCacheGroupSize/info": "KV 缓存量化时使用的分组位数",
+ "llm.load.mlx.kvCacheQuantizationStart/title": "KV 缓存量化:开始量化的上下文长度",
+ "llm.load.mlx.kvCacheQuantizationStart/subTitle": "达到此上下文长度后开始对 KV 缓存进行量化",
+ "llm.load.mlx.kvCacheQuantizationStart/info": "达到此上下文长度后开始对 KV 缓存进行量化",
+ "llm.load.mlx.kvCacheQuantization/title": "KV 缓存量化",
+ "llm.load.mlx.kvCacheQuantization/subTitle": "对模型的 KV 缓存进行量化,可加快生成速度并降低内存占用,但可能影响输出质量。",
+ "llm.load.mlx.kvCacheQuantization/bits/title": "KV 缓存量化位数",
+ "llm.load.mlx.kvCacheQuantization/bits/tooltip": "KV 缓存量化所用的位数",
+ "llm.load.mlx.kvCacheQuantization/bits/bits": "位数",
+ "llm.load.mlx.kvCacheQuantization/groupSize/title": "分组策略",
+ "llm.load.mlx.kvCacheQuantization/groupSize/accuracy": "高精度",
+ "llm.load.mlx.kvCacheQuantization/groupSize/balanced": "均衡",
+ "llm.load.mlx.kvCacheQuantization/groupSize/speedy": "极速",
+ "llm.load.mlx.kvCacheQuantization/groupSize/tooltip": "高级:量化乘法的分组大小配置\n\n• 高精度 = 分组 32\n• 均衡 = 分组 64\n• 极速 = 分组 128\n",
+ "llm.load.mlx.kvCacheQuantization/quantizedStart/title": "达到此上下文长度后开始量化",
+ "llm.load.mlx.kvCacheQuantization/quantizedStart/tooltip": "当上下文长度达到该值时,开始对 KV 缓存进行量化",
+
+ "embedding.load.contextLength/title": "上下文长度",
+ "embedding.load.contextLength/subTitle": "模型可以一次性关注的token最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
+ "embedding.load.contextLength/info": "指定模型一次可以考虑的最大token数量,影响其处理过程中保留的上下文量",
+ "embedding.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
+ "embedding.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
+ "embedding.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
+ "embedding.load.llama.evalBatchSize/title": "评估批处理大小",
+ "embedding.load.llama.evalBatchSize/subTitle": "每次处理的输入token数量。增加此值会提高性能,但会增加内存使用量",
+ "embedding.load.llama.evalBatchSize/info": "设置评估期间一起处理的token数量",
+ "embedding.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
+ "embedding.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
+ "embedding.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
+ "embedding.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
+ "embedding.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
+ "embedding.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
+ "embedding.load.llama.keepModelInMemory/title": "保持模型在内存中",
+ "embedding.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
+ "embedding.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
+ "embedding.load.llama.tryMmap/title": "尝试 mmap()",
+ "embedding.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
+ "embedding.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
+ "embedding.load.seed/title": "种子",
+ "embedding.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机种子",
+
+ "embedding.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
+
+ "presetTooltip": {
+ "included/title": "预设值",
+ "included/description": "以下字段将会被应用",
+ "included/empty": "在此上下文中,此预设没有适用的字段。",
+ "included/conflict": "您将被要求选择是否应用此值",
+ "separateLoad/title": "加载时配置",
+ "separateLoad/description.1": "预设还包含以下加载时配置。加载时配置是全模型范围的,并且需要重新加载模型才能生效。按住",
+ "separateLoad/description.2": "应用到",
+ "separateLoad/description.3": "。",
+ "excluded/title": "可能不适用",
+ "excluded/description": "以下字段包含在预设中,但在当前上下文中不适用。",
+ "legacy/title": "旧版预设",
+ "legacy/description": "这是一个旧版预设。它包括以下字段,这些字段现在要么自动处理,要么不再适用。",
+ "button/publish": "发布到 Hub",
+ "button/pushUpdate": "推送更改到 Hub",
+ "button/noChangesToPush": "没有可推送的更改",
+ "button/export": "导出",
+ "hubLabel": "来自 {{user}} 的 Hub 预设",
+ "ownHubLabel": "您的 Hub 预设"
+ },
+
+ "customInputs": {
+ "string": {
+ "emptyParagraph": "<空>"
+ },
+ "checkboxNumeric": {
+ "off": "关闭"
+ },
+ "llamaCacheQuantizationType": {
+ "off": "关闭"
+ },
+ "mlxKvCacheBits": {
+ "off": "关闭"
+ },
+ "stringArray": {
+ "empty": "<空>"
+ },
+ "llmPromptTemplate": {
+ "type": "类型",
+ "types.jinja/label": "模板 (Jinja)",
+ "jinja.bosToken/label": "开始token (BOS Token)",
+ "jinja.eosToken/label": "结束token (EOS Token)",
+ "jinja.template/label": "模板",
+ "jinja/error": "解析 Jinja 模板失败: {{error}}",
+ "jinja/empty": "请在上方输入一个 Jinja 模板。",
+ "jinja/unlikelyToWork": "您提供的 Jinja 模板很可能无法正常工作,因为它没有引用变量 \"messages\"。请检查您输入的模板是否正确。",
+ "types.manual/label": "手动",
+ "manual.subfield.beforeSystem/label": "系统前缀",
+ "manual.subfield.beforeSystem/placeholder": "输入系统前缀...",
+ "manual.subfield.afterSystem/label": "系统后缀",
+ "manual.subfield.afterSystem/placeholder": "输入系统后缀...",
+ "manual.subfield.beforeUser/label": "用户前缀",
+ "manual.subfield.beforeUser/placeholder": "输入用户前缀...",
+ "manual.subfield.afterUser/label": "用户后缀",
+ "manual.subfield.afterUser/placeholder": "输入用户后缀...",
+ "manual.subfield.beforeAssistant/label": "助手前缀",
+ "manual.subfield.beforeAssistant/placeholder": "输入助手前缀...",
+ "manual.subfield.afterAssistant/label": "助手后缀",
+ "manual.subfield.afterAssistant/placeholder": "输入助手后缀...",
+ "stopStrings/label": "额外停止字符串",
+ "stopStrings/subTitle": "除了用户指定的停止字符串之外,还将使用特定于模板的停止字符串。"
+ },
+ "contextLength": {
+ "maxValueTooltip": "这是模型训练所能处理的最大token数量。点击以将上下文设置为此值",
+ "maxValueTextStart": "模型支持最多",
+ "maxValueTextEnd": "个token",
+ "tooltipHint": "尽管模型可能支持一定数量的token,但如果您的机器资源无法处理负载,性能可能会下降 - 增加此值时请谨慎"
+ },
+ "contextOverflowPolicy": {
+ "stopAtLimit": "到达限制时停止",
+ "stopAtLimitSub": "一旦模型的内存满载即停止生成",
+ "truncateMiddle": "截断中间",
+ "truncateMiddleSub": "从对话中间移除消息以为新消息腾出空间。模型仍然会记住对话的开头",
+ "rollingWindow": "滚动窗口",
+ "rollingWindowSub": "模型将始终接收最近的几条消息,但可能会忘记对话的开头"
+ },
+ "llamaAccelerationOffloadRatio": {
+ "max": "最大",
+ "off": "关闭"
+ },
+ "gpuSplitStrategy": {
+ "evenly": "均匀分配",
+ "favorMainGpu": "优先主 GPU"
+ },
+ "speculativeDecodingDraftModel": {
+ "readMore": "了解工作原理",
+ "placeholder": "选择兼容的草稿模型",
+ "noCompatible": "当前模型选择下未找到兼容的草稿模型",
+ "stillLoading": "正在识别兼容的草稿模型...",
+ "notCompatible": "所选草稿模型()与当前模型选择()不兼容。",
+ "off": "关闭",
+ "loadModelToSeeOptions": "加载模型 以查看兼容选项",
+ "compatibleWithNumberOfModels": "推荐用于至少 {{dynamicValue}} 个模型",
+ "recommendedForSomeModels": "推荐用于部分模型",
+ "recommendedForLlamaModels": "推荐用于 Llama 模型",
+ "recommendedForQwenModels": "推荐用于 Qwen 模型",
+ "onboardingModal": {
+ "introducing": "新功能介绍",
+ "speculativeDecoding": "投机解码",
+ "firstStepBody": "llama.cpp 和 MLX 模型推理加速",
+ "secondStepTitle": "投机解码能够加速推理",
+ "secondStepBody": "投机解码是一种让两个模型协作的技术:\n - 一个规模较大的“主”模型\n - 一个较小的“草稿”模型\n\n生成过程中,草稿模型会快速提出token,由主模型进行验证。验证的过程比实际生成更快。\n**通常,主模型与草稿模型的体积差距越大,加速效果越明显。**\n\n为了保证质量,主模型只会接受与自身结果一致的token,从而实现大模型的响应质量与更快的推理速度。两个模型必须使用相同的词表。",
+ "draftModelRecommendationsTitle": "草稿模型推荐",
+ "basedOnCurrentModels": "基于您当前的模型",
+ "close": "关闭",
+ "next": "下一步",
+ "done": "完成"
+ },
+ "speculativeDecodingLoadModelToSeeOptions": "请先加载模型 ",
+ "errorEngineNotSupported": "投机解码需引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎()并重新加载模型以使用此功能。",
+ "errorEngineNotSupported/noKey": "投机解码需引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎并重新加载模型以使用此功能。"
+ },
+ "llmReasoningParsing": {
+ "startString/label": "起始字符串",
+ "startString/placeholder": "请输入起始字符串...",
+ "endString/label": "结束字符串",
+ "endString/placeholder": "请输入结束字符串..."
+ }
+ },
+ "saveConflictResolution": {
+ "title": "选择要包含在预设中的值",
+ "description": "挑选并选择要保留的值",
+ "instructions": "点击一个值以包含它",
+ "userValues": "先前的值",
+ "presetValues": "新值",
+ "confirm": "确认",
+ "cancel": "取消"
+ },
+ "applyConflictResolution": {
+ "title": "保留哪些值?",
+ "description": "您有未提交的更改与即将应用的预设有重叠",
+ "instructions": "点击一个值以保留它",
+ "userValues": "当前值",
+ "presetValues": "即将应用的预设值",
+ "confirm": "确认",
+ "cancel": "取消"
+ },
+ "empty": "<空>",
+ "noModelSelected": "未选择模型",
+ "apiIdentifier.label": "API 标识符",
+ "apiIdentifier.hint": "可选,为此模型提供一个标识符。该标识符将在 API 请求中使用。留空则使用默认标识符。",
+ "idleTTL.label": "空闲时自动卸载",
+ "idleTTL.hint": "如设置,模型在空闲指定时间后将自动卸载。",
+ "idleTTL.mins": "分钟",
+
+ "presets": {
+ "title": "预设",
+ "commitChanges": "提交更改",
+ "commitChanges/description": "将您的更改提交给预设。",
+ "commitChanges.manual": "检测到新的字段。您将能够选择要包含在预设中的更改。",
+ "commitChanges.manual.hold.0": "按住",
+ "commitChanges.manual.hold.1": "选择要提交给预设的更改。",
+ "commitChanges.saveAll.hold.0": "按住",
+ "commitChanges.saveAll.hold.1": "保存所有更改。",
+ "commitChanges.saveInPreset.hold.0": "按住",
+ "commitChanges.saveInPreset.hold.1": "仅保存已经包含在预设中的字段的更改。",
+ "commitChanges/error": "未能将更改提交给预设。",
+ "commitChanges.manual/description": "选择要包含在预设中的更改。",
+ "saveAs": "另存为新预设...",
+ "presetNamePlaceholder": "为预设输入一个名称...",
+ "cannotCommitChangesLegacy": "这是一个旧版预设,无法修改。您可以使用“另存为新预设...”创建一个副本。",
+ "cannotCommitChangesNoChanges": "没有更改可以提交。",
+ "emptyNoUnsaved": "选择一个预设...",
+ "emptyWithUnsaved": "未保存的预设",
+ "saveEmptyWithUnsaved": "保存预设为...",
+ "saveConfirm": "保存",
+ "saveCancel": "取消",
+ "saving": "正在保存...",
+ "save/error": "未能保存预设。",
+ "deselect": "取消选择预设",
+ "deselect/error": "取消选择预设失败。",
+ "select/error": "选择预设失败。",
+ "delete/error": "删除预设失败。",
+ "discardChanges": "丢弃未保存的更改",
+ "discardChanges/info": "丢弃所有未提交的更改并恢复预设至原始状态",
+ "newEmptyPreset": "创建新的空预设...",
+ "importPreset": "导入",
+ "contextMenuCopyIdentifier": "复制预设标识符",
+ "contextMenuSelect": "选择预设",
+ "contextMenuDelete": "删除",
+ "contextMenuShare": "发布中...",
+ "contextMenuOpenInHub": "在 Hub 上查看",
+ "contextMenuPullFromHub": "拉取最新版本",
+ "contextMenuPushChanges": "推送更改到 Hub",
+ "contextMenuPushingChanges": "正在推送...",
+ "contextMenuPushedChanges": "更改已推送",
+ "contextMenuExport": "导出文件",
+ "contextMenuRevealInExplorer": "在文件资源管理器中显示",
+ "contextMenuRevealInFinder": "在 Finder 中显示",
+ "share": {
+ "title": "发布预设",
+ "action": "分享你的预设,让他人下载、点赞和fork",
+ "presetOwnerLabel": "所有者",
+ "uploadAs": "你的预设将以 {{name}} 创建",
+ "presetNameLabel": "预设名称",
+ "descriptionLabel": "描述(可选)",
+ "loading": "正在发布...",
+ "success": "预设已成功发布",
+ "presetIsLive": " 已在 Hub 上发布!",
+ "close": "关闭",
+ "confirmViewOnWeb": "在网页上查看",
+ "confirmCopy": "复制链接",
+ "confirmCopied": "已复制!",
+ "pushedToHub": "你的预设已推送到 Hub",
+ "descriptionPlaceholder": "请输入描述...",
+ "willBePublic": "发布你的预设将使其公开",
+ "willBePrivate": "仅您可见",
+ "willBeOrgVisible": "组织内成员均可见",
+ "publicSubtitle": "你的预设现在为 公开。其他人可以在 lmstudio.ai 下载和 fork 它",
+ "privateUsageReached": "私有预设的数量已达上限",
+ "continueInBrowser": "在浏览器继续",
+ "confirmShareButton": "发布",
+ "error": "预设发布失败",
+ "createFreeAccount": "请在 Hub 创建免费账号以发布预设"
+ },
+ "update": {
+ "title": "推送更改到 Hub",
+ "title/success": "预设已成功更新",
+ "subtitle": "修改 并推送到 Hub",
+ "descriptionLabel": "描述",
+ "descriptionPlaceholder": "请输入描述...",
+ "loading": "正在推送...",
+ "cancel": "取消",
+ "createFreeAccount": "请在 Hub 创建免费账号以发布预设",
+ "error": "推送更新失败",
+ "confirmUpdateButton": "推送"
+ },
+ "resolve": {
+ "title": "解决冲突...",
+ "tooltip": "打开窗口以解决与 Hub 版本的差异"
+ },
+ "loginToManage": {
+ "title": "登录以管理..."
+ },
+ "downloadFromHub": {
+ "title": "下载",
+ "downloading": "下载中...",
+ "success": "下载完成!",
+ "error": "下载失败"
+ },
+ "push": {
+ "title": "推送更改",
+ "pushing": "推送中...",
+ "success": "推送成功!",
+ "tooltip": "将本地更改推送到 Hub 上托管的远程版本",
+ "error": "推送失败"
+ },
+ "saveAsNewModal": {
+ "title": "哎呀!在 Hub 上未找到预设",
+ "confirmSaveAsNewDescription": "您是否希望将此预设作为新版本发布?",
+ "confirmButton": "作为新版本发布"
+ },
+ "pull": {
+ "title": "拉取最新版本",
+ "error": "拉取失败",
+ "contextMenuErrorMessage": "拉取失败",
+ "success": "已拉取",
+ "pulling": "拉取中...",
+ "upToDate": "已是最新版本!",
+ "unsavedChangesModal": {
+ "title": "你有未保存的更改。",
+ "bodyContent": "从远程拉取的内容将覆盖您的未保存更改,是否继续?",
+ "confirmButton": "覆盖未保存的更改"
+ }
+ },
+ "import": {
+ "title": "从文件导入预设",
+ "dragPrompt": "拖拽预设 JSON 文件或从电脑选择",
+ "remove": "移除",
+ "cancel": "取消",
+ "importPreset_zero": "导入预设",
+ "importPreset_one": "导入预设",
+ "importPreset_other": "导入 {{count}} 个预设",
+ "selectDialog": {
+ "title": "选择预设文件(.json 或者 .tar.gz)",
+ "button": "导入"
+ },
+ "error": "导入预设失败",
+ "resultsModal": {
+ "titleSuccessSection_one": "成功导入 1 个预设",
+ "titleSuccessSection_other": "成功导入 {{count}} 个预设",
+ "titleFailSection_zero": "",
+ "titleFailSection_one": "({{count}} 个失败)",
+ "titleFailSection_other": "({{count}} 个失败)",
+ "titleAllFailed": "预设导入失败",
+ "importMore": "继续导入",
+ "close": "完成",
+ "successBadge": "成功",
+ "alreadyExistsBadge": "预设已存在",
+ "errorBadge": "错误",
+ "invalidFileBadge": "无效文件",
+ "otherErrorBadge": "导入预设失败",
+ "errorViewDetailsButton": "查看详情",
+ "seeError": "查看错误",
+ "noName": "无预设名称",
+ "useInChat": "在聊天中使用"
+ },
+ "importFromUrl": {
+ "button": "从 URL 导入...",
+ "title": "从 URL 导入",
+ "back": "从文件导入...",
+ "action": "请在下方粘贴你要导入的 LM Studio Hub 预设链接",
+ "invalidUrl": "无效的 URL,请确保输入的是有效的 LM Studio Hub 预设链接。",
+ "tip": "你也可以在 LM Studio Hub 直接点击 {{buttonName}} 按钮安装该预设",
+ "confirm": "导入",
+ "cancel": "取消",
+ "loading": "正在导入...",
+ "error": "下载预设失败。"
+ }
+ },
+ "download": {
+ "title": "从 LM Studio Hub 拉取 ",
+ "subtitle": "保存 到你的预设。保存后你可以在应用中使用此预设",
+ "button": "拉取",
+ "button/loading": "正在拉取...",
+ "cancel": "取消",
+ "error": "下载预设失败。"
+ },
+ "inclusiveness": {
+ "speculativeDecoding": "包含在预设中"
+ }
+ },
+
+ "flashAttentionWarning": "Flash Attention 是一项实验性功能,可能会导致某些模型出现问题。如果您遇到问题,请尝试禁用它。",
+ "llamaKvCacheQuantizationWarning": "KV 缓存量化是一项实验性功能,可能会导致某些模型出现问题。V 缓存量化必须启用 Flash Attention。如果遇到问题,请将默认值重置为\"F16\"。",
+
+ "seedUncheckedHint": "随机种子",
+ "ropeFrequencyBaseUncheckedHint": "自动",
+ "ropeFrequencyScaleUncheckedHint": "自动",
+
+ "hardware": {
+ "environmentVariables": "环境变量",
+ "environmentVariables.info": "如果不确定,请保留默认值",
+ "environmentVariables.reset": "重置为默认值",
+
+ "gpus.information": "配置检测到的图形处理单元 (GPU)",
+ "gpuSettings": {
+ "editMaxCapacity": "编辑最大容量",
+ "hideEditMaxCapacity": "隐藏最大容量编辑",
+ "allOffWarning": "所有 GPU 均已关闭或禁用,请确保分配了至少一个 GPU 以加载模型",
+ "split": {
+ "title": "分配策略",
+ "placeholder": "选择 GPU 内存分配方式",
+ "options": {
+ "generalDescription": "配置模型将如何加载到您的 GPU 上",
+ "evenly": {
+ "title": "均匀分配",
+ "description": "在多个 GPU 之间均匀分配内存"
+ },
+ "priorityOrder": {
+ "title": "按顺序填充",
+ "description": "先在第一个 GPU 上分配内存,然后依次分配到后续 GPU"
+ },
+ "custom": {
+ "title": "自定义",
+ "description": "分配内存",
+ "maxAllocation": "最大分配"
+ }
+ }
+ },
+ "deviceId.info": "此设备的唯一标识符",
+ "changesOnlyAffectNewlyLoadedModels": "更改仅影响新加载的模型",
+ "toggleGpu": "启用/禁用 GPU"
+ }
+ },
+
+ "load.gpuSplitConfig/title": "GPU 分配配置",
+ "envVars/title": "设置环境变量",
+ "envVars": {
+ "select": {
+ "placeholder": "选择环境变量...",
+ "noOptions": "无更多可用选项",
+ "filter": {
+ "placeholder": "过滤搜索结果",
+ "resultsFound_zero": "未找到结果",
+ "resultsFound_one": "找到 1 个结果",
+ "resultsFound_other": "找到 {{count}} 个结果"
+ }
+ },
+ "inputValue": {
+ "placeholder": "输入值"
+ },
+ "values": {
+ "title": "当前值"
+ }
+ }
+}
diff --git a/developer.json b/developer.json
new file mode 100644
index 00000000..d2592439
--- /dev/null
+++ b/developer.json
@@ -0,0 +1,197 @@
+{
+ "tabs/server": "本地服务器",
+ "tabs/extensions": "LM 运行环境",
+ "loadSettings/title": "加载设置",
+ "modelSettings/placeholder": "选择一个模型进行配置",
+
+ "loadedModels/noModels": "没有已加载的模型",
+
+ "serverOptions/title": "服务器选项",
+ "serverOptions/configurableTitle": "可配置选项",
+ "serverOptions/port/hint": "设置本地服务器将使用的网络端口。默认情况下,LM Studio 使用端口 1234。如果该端口已被占用,您可能需要更改此设置。",
+ "serverOptions/port/subtitle": "监听的端口",
+ "serverOptions/autostart/title": "自动启动服务器",
+ "serverOptions/autostart/hint": "当加载模型时自动启动本地服务器",
+ "serverOptions/port/integerWarning": "端口号必须是整数",
+ "serverOptions/port/invalidPortWarning": "端口号必须介于 1 到 65535 之间",
+ "serverOptions/cors/title": "启用 CORS",
+ "serverOptions/cors/hint1": "启用 CORS (跨源资源共享) 允许您访问的网站向 LM Studio 服务器发起请求。",
+ "serverOptions/cors/hint2": "当从网页或 VS Code 或其他扩展发起请求时,可能需要启用 CORS。",
+ "serverOptions/cors/subtitle": "允许跨源请求",
+ "serverOptions/network/title": "在网络中提供服务",
+ "serverOptions/network/subtitle": "向网络中的设备开放服务器",
+ "serverOptions/network/hint1": "是否允许来自网络中其他设备的连接。",
+ "serverOptions/network/hint2": "如果未选中,服务器将仅监听本地主机。",
+ "serverOptions/verboseLogging/title": "详细日志记录",
+ "serverOptions/verboseLogging/subtitle": "为本地服务器启用详细日志记录",
+ "serverOptions/contentLogging/title": "记录提示和响应",
+ "serverOptions/contentLogging/subtitle": "本地请求/响应日志记录设置",
+ "serverOptions/contentLogging/hint": "是否在本地服务器日志文件中记录提示和/或响应。",
+ "serverOptions/redactContent/title": "内容脱敏",
+ "serverOptions/redactContent/hint": "启用后,可防止敏感数据(如请求和响应内容)被记录在日志中。",
+ "serverOptions/logIncomingTokens/title": "记录传入的 Token",
+ "serverOptions/logIncomingTokens/hint": "是否在生成过程中记录每个 Token",
+ "serverOptions/fileLoggingMode/title": "文件日志模式",
+ "serverOptions/fileLoggingMode/off/title": "关闭",
+ "serverOptions/fileLoggingMode/off/hint": "不创建日志文件",
+ "serverOptions/fileLoggingMode/succinct/title": "简洁",
+ "serverOptions/fileLoggingMode/succinct/hint": "记录与控制台相同的内容。长请求将被截断。",
+ "serverOptions/fileLoggingMode/full/title": "完整",
+ "serverOptions/fileLoggingMode/full/hint": "不对长请求进行截断。",
+ "serverOptions/jitModelLoading/title": "JIT(即时)模型加载",
+ "serverOptions/jitModelLoading/hint": "启用后,如果请求指定了一个未加载的模型,该模型将自动加载并使用。此外,\"/v1/models\" 端点还将包含尚未加载的模型。",
+ "serverOptions/loadModel/error": "加载模型失败",
+ "serverOptions/jitModelLoadingTTL/title": "自动卸载未使用的即时加载模型",
+ "serverOptions/jitModelLoadingTTL/hint": "通过 API 请求即时加载的模型,若在一段时间内未被使用,将会被自动卸载(TTL)",
+ "serverOptions/jitModelLoadingTTL/ttl/label": "最大空闲 TTL",
+ "serverOptions/jitModelLoadingTTL/ttl/unit": "分钟",
+ "serverOptions/unloadPreviousJITModelOnLoad/title": "仅保留最后一个即时加载的模型",
+ "serverOptions/unloadPreviousJITModelOnLoad/hint": "确保在任意时刻最多只有一个即时加载的模型(会卸载之前的模型)",
+
+ "serverLogs/scrollToBottom": "跳转到底部",
+ "serverLogs/clearLogs": "清除日志 ({{shortcut}})",
+ "serverLogs/openLogsFolder": "打开服务器日志文件夹",
+
+ "runtimeSettings/title": "运行环境设置",
+ "runtimeSettings/chooseRuntime/title": "配置运行环境",
+ "runtimeSettings/chooseRuntime/description": "为每个模型格式选择一个运行环境",
+ "runtimeSettings/chooseRuntime/showAllVersions/label": "显示所有运行环境",
+ "runtimeSettings/chooseRuntime/showAllVersions/hint": "默认情况下,LM Studio 只显示每个兼容运行环境的最新版本。启用此选项可以查看所有可用的运行环境。",
+ "runtimeSettings/chooseRuntime/select/placeholder": "选择一个运行环境",
+
+ "runtimeSettings/chooseFrameworks/title": "框架",
+ "runtimeSettings/chooseFrameworks/description": "为每个功能选择要使用的框架",
+ "runtimeSettings/chooseFramework/documentParser/builtIn/label": "内置解析器",
+ "runtimeSettings/chooseFramework/documentParser/select/label": "文档解析器",
+ "runtimeSettings/chooseFramework/documentParser/select/placeholder": "请选择文档解析器",
+
+ "runtimeOptions/uninstall": "卸载",
+ "runtimeOptions/uninstallDialog/title": "卸载 {{runtimeName}}?",
+ "runtimeOptions/uninstallDialog/body": "卸载此运行环境将从系统中移除它。此操作不可逆。",
+ "runtimeOptions/uninstallDialog/body/caveats": "某些文件可能需要在重启 LM Studio 后才能被移除。",
+ "runtimeOptions/uninstallDialog/error": "卸载运行环境失败",
+ "runtimeOptions/uninstallDialog/confirm": "继续并卸载",
+ "runtimeOptions/uninstallDialog/cancel": "取消",
+ "runtimeOptions/noCompatibleRuntimes": "未找到兼容的运行环境",
+ "runtimeOptions/downloadIncompatibleRuntime": "此运行环境被认为与您的机器不兼容。它很可能无法正常工作。",
+ "runtimeOptions/noRuntimes": "未找到运行环境",
+
+ "runtimes": {
+ "manageLMRuntimes": "管理 LM 运行环境",
+ "includeOlderRuntimeVersions": "包含旧版本",
+ "dismiss": "关闭",
+ "updateAvailableToast": {
+ "title": "LM 运行环境更新可用!"
+ },
+ "updatedToast": {
+ "title": " ✅ LM 运行环境已更新:{{runtime}} → v{{version}}",
+ "preferencesUpdated": "新加载的 {{compatibilityTypes}} 模型将使用更新后的运行环境。"
+ },
+ "noAvx2ErrorMessage": "所有 LM 运行环境当前都需要支持 AVX2 指令集的 CPU",
+ "downloadableRuntimes": {
+ "runtimeExtensionPacks": "运行环境扩展包",
+ "refresh": "刷新",
+ "refreshing": "刷新中...",
+ "filterSegment": {
+ "compatibleOnly": "仅兼容",
+ "all": "全部"
+ },
+ "card": {
+ "releaseNotes": "版本说明",
+ "latestVersionInstalled": "已安装最新版本",
+ "updateAvailable": "更新可用"
+ }
+ },
+ "installedRuntimes": {
+ "manage": {
+ "title": "管理可用的运行环境"
+ },
+ "dropdownOptions": {
+ "installedVersions": "管理版本",
+ "close": "关闭"
+ },
+ "tabs": {
+ "all": "全部",
+ "frameworks": "我的框架",
+ "engines": "我的引擎"
+ },
+ "detailsModal": {
+ "installedVersions": "{{runtimeName}}的已安装版本",
+ "manifestJsonTitle": "清单 JSON(高级)",
+ "releaseNotesTitle": "版本说明",
+ "noReleaseNotes": "该版本无可用的版本说明",
+ "back": "返回",
+ "close": "关闭"
+ },
+ "noEngines": "未安装引擎",
+ "noFrameworks": "未安装框架"
+ }
+ },
+
+ "inferenceParams/noParams": "此模型类型无可用配置的推理参数",
+
+ "quickDocs": {
+ "tabChipTitle": "快速文档",
+ "newToolUsePopover": "代码片段现已在“快速文档”中提供。点击此处开始使用工具!",
+ "newToolUsePopoverTitle": "📚 快速文档",
+ "learnMore": "ℹ️ 👾 要了解有关 LM Studio 本地服务器端的更多信息,请访问[文档](https://lmstudio.ai/docs)。",
+ "helloWorld": {
+ "title": "你好,世界!"
+ },
+ "chat": {
+ "title": "聊天"
+ },
+ "structuredOutput": {
+ "title": "结构化输出"
+ },
+ "imageInput": {
+ "title": "图像输入"
+ },
+ "embeddings": {
+ "title": "文本嵌入"
+ },
+ "toolUse": {
+ "title": "工具使用",
+ "tab": {
+ "saveAsPythonFile": "保存为Python文件",
+ "runTheScript": "运行脚本:",
+ "savePythonFileCopyPaste": "保存为Python文件以进行复制粘贴命令"
+ }
+ },
+ "newBadge": "新功能"
+ },
+
+ "endpoints/openaiCompatRest/title": "支持的端点 (与 OpenAI 兼容的格式)",
+ "endpoints/openaiCompatRest/getModels": "列出当前已加载的模型",
+ "endpoints/openaiCompatRest/postCompletions": "文本补全模式。给定一个提示,预测下一个token。注意:OpenAI 认为此端点已'弃用'。",
+ "endpoints/openaiCompatRest/postChatCompletions": "聊天补全。向模型发送聊天历史以预测下一个助手响应。",
+ "endpoints/openaiCompatRest/postEmbeddings": "文本嵌入。为给定的文本输入生成文本嵌入。接受字符串或字符串数组。",
+
+ "model.createVirtualModelFromInstance": "另存为新的虚拟模型",
+ "model.createVirtualModelFromInstance/error": "另存为新的虚拟模型失败",
+
+ "model": {
+ "toolUseSectionTitle": "工具使用",
+ "toolUseDescription": "检测到此模型经过工具使用的训练\n\n打开快速文档以了解更多信息。"
+ },
+
+ "apiConfigOptions/title": "API 配置",
+
+ "serverOptions/allowMcp/hint": "允许使用不在您的 mcp.json 中的 MCP。这些 MCP 连接是临时的,仅在请求期间存在。目前仅支持远程 MCP。",
+ "serverOptions/stop/error": "停止服务器失败",
+ "endpoints/lmStudioRestV1/postModelsLoad": "使用选项加载模型",
+ "serverOptions/allowMcp/mode/off": "关闭",
+ "endpoints/lmStudioRestV1/postModelsDownload": "下载模型",
+ "endpoints/openaiCompatRest/segmentedLabel": "类 OpenAI",
+ "endpoints/anthropicCompatRest/segmentedLabel": "Anthropic 兼容",
+ "serverOptions/start/error": "启动服务器失败",
+ "endpoints/lmStudioRestV1/getModels": "列出可用模型",
+ "endpoints/lmStudioRestV1/postChat": "与模型聊天。支持有状态的多轮对话和 MCP",
+ "serverOptions/allowMcp/title": "允许远程 MCP",
+ "serverOptions/allowMcp/mode/remote": "远程",
+ "endpoints/lmStudioRestV1/getModelsDownloadStatus": "获取模型下载状态",
+ "endpoints/openaiCompatRest/postResponses": "生成模型响应的高级接口。通过将前一个响应的 ID 作为输入传递给下一个响应来创建有状态交互。",
+ "serverOptions/allowMcp/mode/off/hint": "不允许服务器请求使用 MCP",
+ "serverOptions/allowMcp/mode/remote/hint": "允许连接到远程 MCP 服务器",
+ "endpoints/lmStudioRest/segmentedLabel": "LM Studio"
+}
diff --git a/models.json b/models.json
new file mode 100644
index 00000000..d0d161bd
--- /dev/null
+++ b/models.json
@@ -0,0 +1,127 @@
+{
+ "pageTitle": "我的模型",
+ "filterModels.placeholder": "筛选模型...",
+ "aggregate_one": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
+ "aggregate_other": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
+
+ "noModels.title": "您的本地 LLM 将显示在这里。",
+ "noModels.discoverButtonText.prefix": "点击左侧边栏的",
+ "noModels.discoverButtonText.suffix": "按钮来发现有趣的 LLM。",
+ "noModels.discoverModelsPrompt": "去探索一些本地 LLM 吧!",
+
+ "modelsTable.arch/label": "架构",
+ "modelsTable.params/label": "参数量",
+ "modelsTable.publisher/label": "发布者",
+ "modelsTable.displayName/label": "名字",
+ "modelsTable.modelKey/label": "模型密钥",
+ "modelsTable.size/label": "尺寸",
+ "modelsTable.dateModified/label": "修改日期",
+ "modelsTable.actions/label": "操作",
+
+ "modelsTable.quant/label": "量化规格",
+ "modelsTable.llms/label": "语言模型",
+ "modelsTable.embeddingModels/label": "嵌入模型",
+
+ "action.model.delete": "删除",
+ "action.model.delete.full": "删除模型",
+ "action.model.delete.confirmation/title": "删除 {{name}}",
+ "action.model.delete.confirmation/description": "您确定吗?这将永久删除与此模型相关的所有文件,此操作不可逆。",
+ "action.model.delete.confirmation/confirm": "删除",
+ "action.model.delete/error": "删除模型失败",
+
+ "loader.model.bundled": "捆绑",
+ "action.cancel": "取消",
+ "indexingOngoing": "正在索引模型... 这可能需要几秒钟",
+ "index/error_one": "索引以下文件夹失败:",
+ "index/error_other": "索引以下文件夹失败:",
+ "badModels/title_one": "索引以下模型失败:",
+ "badModels/title_other": "索引以下模型失败:",
+ "badModels.virtualModelIncorrectPlacement": "虚拟模型放置错误。预期位置为 {{expected}}。实际位置为 {{actual}}。",
+ "badModels.virtualModelBadManifest": "无效的虚拟模型清单 (model.yaml):",
+ "unresolvedVirtualModels/title_one": "解析以下虚拟模型失败:",
+ "unresolvedVirtualModels/title_other": "解析以下虚拟模型失败:",
+ "unresolvedVirtualModels.missingModel": "缺少依赖模型:{{missing}}。依赖路径:\n{{chain}}",
+ "unresolvedVirtualModels.circular": "检测到循环依赖。",
+ "unresolvedVirtualModels.fix": "修复",
+ "unresolvedVirtualModels.revealInExplorer": "在文件资源管理器中显示",
+ "unresolvedVirtualModels.revealInFinder": "在 Finder 中显示",
+ "unresolvedVirtualModels.reveal/error": "显示失败",
+
+ "modelsDirectory": "模型目录",
+ "modelsDirectory.change": "更改...",
+ "modelsDirectory.change/error": "修改模型路径失败",
+ "modelsDirectory.reset": "重置为默认路径",
+ "modelsDirectory.reveal.mac": "在 Finder 中显示",
+ "modelsDirectory.reveal.nonMac": "在文件资源管理器中打开",
+ "modelsDirectory.reveal.mac/error": "在 Finder 中显示失败",
+ "modelsDirectory.reveal.nonMac/error": "在文件资源管理器中打开失败",
+ "modelsDirectory.forceReindex": "刷新",
+ "loadState/loaded": "已加载",
+ "loadState/loading": "加载中",
+ "loadState/unloaded": "未加载",
+ "loadState/unloading": "卸载中",
+ "loadState/idle": "空闲",
+ "pinned": "此模型已被固定。右键点击取消固定。",
+ "lastUsed": "最后使用的",
+ "contextMenu/pin": "固定到顶部",
+ "contextMenu/unpin": "取消固定",
+ "contextMenu/copyAbsolutePath": "复制绝对路径",
+ "contextMenu/copyModelName": "复制模型路径",
+ "contextMenu/copyModelDefaultIdentifier": "复制默认标识符",
+ "contextMenu/showRawMetadata": "查看原始元数据",
+ "contextMenu/openOnHuggingFace": "在 Hugging Face 上打开",
+ "contextMenu": {
+ "showOnWeb": "在网页上显示",
+ "pullLatest": {
+ "label": "拉取最新版本",
+ "checking": "检查更新中...",
+ "upToDate": "已是最新版本",
+ "error": "检查更新失败"
+ }
+ },
+ "tooltip/moreActions": "更多操作",
+ "tooltip/getInfo": "获取信息",
+ "tooltip/editModelDefaultConfig": "编辑模型默认配置",
+ "tooltip/editModelDefaultConfig/override": "编辑模型默认配置(*当前有覆盖)",
+ "tooltip/visionBadge": "此模型支持图像输入",
+ "tooltip/toolUseBadge": "此模型经过工具使用的训练",
+
+ "visionBadge/label": "此模型支持图像输入",
+ "toolUseBadge/label": "此模型经过工具使用的训练",
+
+ "loader.action.load": "加载模型",
+ "loader.action.clearChanges": "清除更改",
+ "loader.action.cancel": "取消",
+ "loader.info.clickOnModelToLoad": "点击模型以加载",
+ "loader.info.configureLoadParameters": "配置模型加载参数",
+ "loader.info.activeGeneratorWarning": "您正在使用带有自定义生成器的插件。当前加载的模型是否适用于该插件,取决于生成器的具体实现方式",
+
+ "virtual": {
+ "local": {
+ "create": "创建虚拟模型",
+ "title": "创建一个本地虚拟模型",
+ "description": "通过将模型与一组配置捆绑来创建虚拟模型,模型的底层权重文件不会被复制。",
+ "modelKey.label": "模型密钥",
+ "modelKey.placeholder": "输入唯一的模型密钥",
+ "modelKey.normalized": "您的模型密钥将被规范化为:{{normalized}}",
+ "baseModel.label": "基础模型",
+ "baseModel.placeholder": "选择基础模型",
+ "baseModel.empty": "下载模型作为基础模型",
+ "next": "下一步",
+ "confirm": "创建",
+ "error": "创建虚拟模型失败"
+ }
+ },
+
+ "indexingPageLoaderText": "正在索引模型...",
+ "loader.guardrails.notEnoughResources/options": "选项",
+ "loader.guardrails.notEnoughResources.moreInfoSection.appearsNotEnoughMemory": "您的系统似乎没有足够的内存来加载此模型。",
+ "loader.guardrails.unavailable": "此模型的内存估计不可用",
+ "loader.guardrails.total": "总计",
+ "loader.guardrails.notEnoughResources": "当前设置下没有足够的资源来加载模型",
+ "loader.guardrails.estimatedMemoryUsage": "估计内存使用量",
+ "loader.guardrails.notEnoughResources.alwaysAllowLoadAnyway": "(不推荐)始终允许“强制加载”而无需按住 Alt/Option",
+ "loader.guardrails.notEnoughResources.moreInfoSection.ifYouBelieveThisIsIncorrect": "您可以在设置中调整模型加载保护,或按住 来强制加载。",
+ "loader.guardrails.gpu": "GPU",
+ "loader.guardrails.notEnoughResources.moreInfoSection.warning": "加载过大的模型可能会使系统过载并导致死机。"
+}
diff --git a/settings.json b/settings.json
new file mode 100644
index 00000000..9d098cfe
--- /dev/null
+++ b/settings.json
@@ -0,0 +1,214 @@
+{
+ "settingsDialogTitle": "应用设置",
+ "settingsDialogButtonTooltip": "应用设置",
+ "accountDialogButtonTooltip": "账户",
+
+ "settingsNewButtonPopover": {
+ "primary": "应用设置现已移至右下角",
+ "secondary": "点击⚙️按钮来打开",
+ "tertiary": "或者按"
+ },
+ "appUpdate": "应用更新",
+ "checkingAppUpdate": "正在检查更新...",
+ "checkForUpdates": "检查更新",
+ "failedCheckingAppUpdate": "检查更新失败",
+ "newUpdateAvailable": "LM Studio 有新版本可用!🎉",
+ "newBetaUpdateAvailable": "LM Studio 有新测试版可用!🛠️🎉",
+ "downloadingInProgress": "正在下载更新...",
+ "downloadUpdate": "更新至 LM Studio {{version}}",
+ "downloadBetaUpdate": "更新至 LM Studio 测试版 {{version}} (版本号 {{build}})",
+ "downloadCompleted": "下载完成!",
+ "updateDownloadComplete": "更新下载成功!",
+ "updateDownloadFailed": "更新失败!",
+ "hasFinishedDownloading": "下载完毕",
+ "yourCurrentVersion": "当前版本为:",
+ "latestVersion": "最新版本为:",
+ "downloadLabel": "立即更新",
+ "downloadLabel/Linux": "下载更新",
+ "cancelDownloadLabel": "取消",
+ "downloadingUpdate": "正在下载 {{item}}...",
+ "updateDownloaded": "新更新已成功下载。重启应用以应用更新",
+ "restartAppToUpdate": "重新启动应用以应用更新",
+ "appUpdatedToastTitle": "已更新至 {{title}}",
+ "appUpdatedToastDescriptionPrefix": "查看",
+ "AppUpdatedToastDescriptionReleaseNotes": "发行说明",
+ "toolUseToastTitle": "测试新功能:工具调用与函数调用 API",
+ "toolUseToastDescription": "支持 Llama 3.1/3.2、Mistral、Qwen 等部分模型,兼容 OpenAI 工具调用,快速上手。",
+ "toolUseToastButtonText": "前往开发者页面体验",
+ "doItLater": "稍后再说",
+ "failedToUpdate": "应用更新失败。请检查您的网络连接或稍后再试。",
+ "retryInBackground": "后台重试",
+ "laterLabel": "稍后",
+ "releaseNotesLabel": "发行说明",
+ "remindMeLater": "稍后提醒我",
+ "failedDownloadUpdate": "下载更新失败",
+ "installAndRelaunch": "安装并重新启动",
+ "uptodate": "您的应用已是最新版本!当前版本为 {{version}}",
+ "preferences": "偏好设置",
+ "general": "常规",
+ "sideButtonLabels": "显示侧边按钮标签",
+ "showModelFileNames": "在“我的模型”中始终显示完整模型文件名",
+ "colorThemeLabel": "颜色主题",
+ "complexityLevelLabel": "用户界面复杂度级别",
+ "selectComplexityLevelPlaceholder": "选择默认的UI复杂度级别",
+ "userComplexityLevelLabel": "普通用户",
+ "powerUserComplexityLevelLabel": "高级用户",
+ "developerComplexityLevelLabel": "开发者",
+ "chatSettingsLabel": "聊天设置",
+ "chat/alwaysShowPromptTemplate": "始终在聊天侧栏显示提示模板",
+ "chat/highlightChatMessageOnHover": "鼠标悬停时高亮显示聊天消息",
+ "chat/doubleClickMessageToEdit": "双击聊天消息以编辑",
+
+ "chat/aiNaming/label": "AI命名聊天",
+ "chat/aiNaming/mode/label": "AI生成的聊天名称",
+ "chat/aiNaming/mode/value/never": "关闭",
+ "chat/aiNaming/mode/value/never/subTitle": "不使用AI生成聊天名称",
+ "chat/aiNaming/mode/value/auto": "自动",
+ "chat/aiNaming/mode/value/auto/subTitle": "根据生成速度自动决定是否使用AI生成聊天名称",
+ "chat/aiNaming/mode/value/always": "开启",
+ "chat/aiNaming/mode/value/always/subTitle": "使用AI生成聊天名称",
+ "chat/aiNaming/emoji": "在AI生成的聊天名称中使用表情符号",
+ "chat/keyboardShortcuts/label": "键盘快捷键",
+ "chat/keyboardShortcuts/verbPrefix": "使用",
+ "chat/keyboardShortcuts/regenerate": "重新生成聊天中的最后一条消息",
+ "chat/keyboardShortcuts/sendMessage": "发送消息",
+
+ "onboarding/blockTitle": "新手引导",
+ "onboarding/dismissedHints": "已关闭的新手引导",
+ "onboarding/resetHintTooltip": "点击以重新启用新手引导",
+ "onboarding/resetAllHints": "重置所有新手引导",
+ "onboarding/noneDismissed": "没有已关闭的提示,目前所有提示项都会显示,直至下次关闭",
+
+ "firstTimeExperienceLabel": "首次聊天体验",
+ "firstTimeExperienceMarkCompletedLabel": "标记为已完成",
+ "firstTimeExperienceResetLabel": "重置",
+ "showPromptSuggestionsLabel": "创建新聊天时显示提示建议",
+ "darkThemeLabel": "深色",
+ "lightThemeLabel": "浅色",
+ "systemThemeLabel": "自动",
+ "sepiaThemeLabel": "护眼",
+ "unloadPreviousModelLabel": "选择要加载的模型时,先卸载所有当前已加载的模型",
+ "languageLabel": "语言",
+ "changeLanguageLabel": "选择应用语言(仍在开发中)",
+ "developerLabel": "开发者",
+ "localServiceLabel": "本地 LLM 服务(无界面)",
+ "showExperimentalFeaturesLabel": "显示实验性功能",
+ "appFirstLoadLabel": "应用首次加载体验",
+ "showDebugInfoBlocksInChatLabel": "在聊天中显示调试信息块",
+ "autoLoadBundledLLMLabel": "启动时自动加载捆绑的大语言模型",
+ "showReleaseNotes": "显示发行说明",
+ "hideReleaseNotes": "隐藏发行说明",
+
+ "backendDownloadNewUpdate": "有新的后端可用!",
+ "backendDownloadNewUpdateAction": "前往开发者页面",
+
+ "backendDownloadChannel.label": "LM Studio 扩展包下载频道",
+ "backendDownloadChannel.value.stable": "稳定版",
+ "backendDownloadChannel.value.beta": "测试版",
+ "backendDownloadChannel.value.latest": "开发版",
+ "backendDownloadChannel.shortLabel": "运行环境下载频道",
+ "backendDownloadChannel.hint": "选择从哪个频道下载 LM Studio 扩展包。\"{{stableName}}\" 是推荐给大多数用户的通道。",
+
+ "appUpdateChannel.label": "LM Studio 更新频道",
+ "appUpdateChannel.value.stable": "稳定版",
+ "appUpdateChannel.value.beta": "beta测试版",
+ "appUpdateChannel.value.alpha": "alpha测试版",
+ "appUpdateChannel.shortLabel": "应用更新频道",
+ "appUpdateChannel.hint": "选择从哪个频道接收 LM Studio 应用更新。\"{{stableName}}\" 是推荐给大多数用户的通道。",
+
+ "modelLoadingGuardrails.label": "模型加载保护",
+ "modelLoadingGuardrails.description": "超出系统资源限制加载模型可能导致系统不稳定或卡死。保护措施可以防止意外过载。如果需要,可以在这里调整这些限制。但请注意,接近系统极限加载模型可能会降低稳定性。",
+ "modelLoadingGuardrails.value.off": "关闭(不推荐)",
+ "modelLoadingGuardrails.value.off/subTitle": "不对系统过载采取预防措施",
+ "modelLoadingGuardrails.value.off/detail": "关闭详情",
+ "modelLoadingGuardrails.value.low": "宽松",
+ "modelLoadingGuardrails.value.low/subTitle": "轻微预防系统过载",
+ "modelLoadingGuardrails.value.low/detail": "宽松详情",
+ "modelLoadingGuardrails.value.medium": "平衡",
+ "modelLoadingGuardrails.value.medium/subTitle": "适度预防系统过载",
+ "modelLoadingGuardrails.value.medium/detail": "平衡详情",
+ "modelLoadingGuardrails.value.high": "严格",
+ "modelLoadingGuardrails.value.high/subTitle": "强烈预防系统过载",
+ "modelLoadingGuardrails.value.high/detail": "严格详情",
+ "modelLoadingGuardrails.value.custom": "自定义",
+ "modelLoadingGuardrails.value.custom/subTitle": "设置最大可加载模型大小的自定义限制",
+ "modelLoadingGuardrails.value.custom/detail": "自定义详情",
+ "modelLoadingGuardrails.custom.label": "内存限制:",
+ "modelLoadingGuardrails.custom.unitGB": "GB",
+ "modelLoadingGuardrails.custom.description": "为模型加载设置自定义内存限制。如果加载的模型会超过此限制,则不会加载模型。",
+
+ "experimentalLoadPresets": "在预设中启用模型加载配置支持",
+ "experimentalLoadPresets.description": "是否允许预设包含模型加载配置。此功能尚处于试验阶段,我们欢迎反馈。",
+
+ "unloadPreviousJITModelOnLoad": "模型自动卸载:始终仅允许一个JIT模型加载(加载新模型时卸载上一个)",
+ "autoDeleteExtensionPacks": "自动删除最近最少使用的运行环境扩展包",
+ "autoUpdateExtensionPacks": "自动更新选中运行环境扩展包",
+ "useHFProxy.label": "使用 LM Studio 的 Hugging Face 代理",
+ "useHFProxy.hint": "使用 LM Studio 提供的 Hugging Face 代理进行模型搜索和下载。适用于无法直接访问 Hugging Face 的用户。",
+ "separateReasoningContentInResponses": "在API响应中区分 `reasoning_content` 和 `content`(如适用)",
+ "separateReasoningContentInResponses/hint": "该设置仅适用于像 DeepSeek R1 及其蒸馏模型等输出带有 和 标记的推理模型。",
+
+ "promptWhenCommittingUnsavedChangesWithNewFields": "提交新字段到预设时显示确认对话框",
+ "promptWhenCommittingUnsavedChangesWithNewFields.description": "如果您想避免意外向预设添加新字段,这将非常有用",
+
+ "enableLocalService": "启用本地 LLM 服务",
+ "enableLocalService.subtitle": "使用 LM Studio 的 LLM 服务器,而无需保持 LM Studio 应用程序打开",
+ "enableLocalService.description": "启用时,LM Studio 本地 LLM 服务将自动启动。关闭 LM Studio 时,本地 LLM 服务也将在系统托盘中继续运行。",
+
+ "expandConfigsOnClick": "点击而非悬停时展开配置",
+
+ "migrateChats": {
+ "label": "迁移 0.3.0 之前的聊天记录",
+ "hasBetterLabel": "重新迁移 0.3.0 之前的聊天记录",
+ "action_one": "迁移 1 条聊天记录",
+ "action_other": "迁移 {{count}} 条聊天记录",
+ "inProgress": "正在迁移聊天记录...",
+ "hint": {
+ "primary": "我们对 v0.3.0+ 版本的聊天记录内部数据结构进行了改造,以支持多版本聊天消息等功能。为了让旧聊天记录出现在应用中,需要将其迁移到新格式。",
+ "details": "迁移过程不会删除您的旧聊天记录,而是会创建一个新格式的副本。",
+ "footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前图片不会自动迁移。"
+ },
+ "hasBetterHint": {
+ "primary": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您想要再次运行它吗?",
+ "details": "迁移过程将创建一个包含新迁移聊天记录的新文件夹。您的旧聊天记录将保持不变。",
+ "footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前图片不会自动迁移。"
+ },
+ "success": "聊天记录迁移成功!",
+ "success_one": "1 条聊天记录迁移成功",
+ "success_other": "{{count}} 条聊天记录迁移成功",
+ "showInstructionsButton": "显示指南",
+ "footerCardText": "来自 LM Studio 早期版本的聊天记录需要迁移才能在此版本中使用。",
+ "hasBetterFooterCardText": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您可以重新运行迁移。(我们将创建一个包含新迁移聊天记录的新文件夹。)",
+ "dismissConfirm": "关闭",
+ "dismissConfirmDescription": "您随时可以在设置中处理聊天记录迁移"
+ },
+ "toolConfirmation": {
+ "label": "工具调用确认",
+ "neverAsk": {
+ "label": "运行工具前不再询问确认(不建议)",
+ "hint": "禁用运行工具前的确认提示。不建议此操作。",
+ "warnTitle": "确定吗?",
+ "warnDescription": "禁用工具调用确认非常危险。如果您的插件中包含可能执行破坏性操作的工具(例如运行命令、删除文件、覆盖文件、上传文件等),模型将无需确认即可执行这些操作。您可以通过逐个工具或逐个插件的方式禁用确认提示。强烈不建议启用此选项。请谨慎操作。",
+ "warnButton": "我了解风险"
+ }
+ },
+
+ "modelLoadingGuardrails.alwaysAllowLoadAnyway": "(不推荐)始终允许“强制加载”而无需按住 Alt/Option",
+ "modelDefaultsLabel": "模型默认设置",
+ "appNavigationBarPositionLabel": "导航栏位置",
+ "appNavigationBarPositionLeft": "左侧",
+ "appNavigationBarPositionTop": "顶部",
+ "defaultContextLength": {
+ "label": "默认上下文长度",
+ "maxTitle": "模型最大值",
+ "customTitle": "自定义值",
+ "maxSubtitle": "使用每个模型支持的最大上下文长度。",
+ "customSubtitle": "设置加载新模型时的默认上下文长度。如果模型支持的最大上下文长度较低,则使用该值。",
+ "invalidNaNError": "无效的上下文长度值。使用 {{value}}",
+ "invalidRangeError": "无效的上下文长度值。应在 1 到 2^30 的范围内。使用 {{value}}",
+ "largeContextWarning": "上下文长度越高,模型占用的内存就越多。如果不确定,请不要更改默认值"
+ },
+ "jitTTL": {
+ "subtitle": "即时加载的模型在空闲指定时间后将自动卸载。"
+ }
+}
diff --git a/shared.json b/shared.json
new file mode 100644
index 00000000..c19259be
--- /dev/null
+++ b/shared.json
@@ -0,0 +1,305 @@
+{
+ "copyLmStudioLinkButton/toolTip": "复制模型下载链接",
+
+ "filter.noMatches": "没有匹配项",
+ "longRunningTask": {
+ "unbundlingDependencies": {
+ "badge": "解包资源"
+ },
+ "performingBackendHardwareSurvey": {
+ "badge": "检测运行环境兼容性"
+ },
+ "indexingRuntimes": {
+ "badge": "索引运行环境"
+ },
+ "indexingModels": {
+ "badge": "索引模型"
+ },
+ "authenticating": {
+ "badge": "身份验证中"
+ },
+ "autoUpdatingExtensionPack": {
+ "badge": "正在更新扩展包({{name}} v{{version}})"
+ }
+ },
+ "auth": {
+ "prompt": "登录 LM Studio Hub",
+ "authError": "身份验证失败",
+ "noAccount": "还没有账号?",
+ "signUp": "注册",
+ "havingTrouble": "遇到问题?",
+ "retry": "重试"
+ },
+ "artifacts": {
+ "fetchError": "获取工件失败",
+ "organizationVisible": "组织可见"
+ },
+
+ "incompatible": "不兼容",
+ "compatible": "兼容",
+ "public": "公开",
+ "private": "私有",
+ "yes": "是",
+ "no": "否",
+ "go": "开始",
+
+ "proceedWithEllipsis": "继续...",
+ "proceed": "继续",
+ "inProgress": "进行中...",
+ "failed": "失败",
+ "pending": "待处理",
+ "doneWithExclamation": "完成!",
+ "done": "完成",
+ "beta": "测试版",
+
+ "complete": {
+ "completeWithEllipsis": "完成...",
+ "complete": "完成",
+ "completingWithEllipsis": "完成中...",
+ "completing": "完成中",
+ "completedWithExclamation": "已完成!",
+ "completed": "已完成"
+ },
+
+ "cancel": {
+ "cancelWithEllipsis": "取消...",
+ "cancel": "取消",
+ "cancelingWithEllipsis": "取消中...",
+ "canceling": "取消中",
+ "canceled": "已取消"
+ },
+
+ "next": {
+ "nextWithEllipsis": "下一步...",
+ "next": "下一步"
+ },
+
+ "back": {
+ "backWithEllipsis": "返回...",
+ "back": "返回"
+ },
+
+ "close": {
+ "closeWithEllipsis": "关闭...",
+ "close": "关闭",
+ "closingWithEllipsis": "关闭中...",
+ "closing": "关闭中",
+ "closedWithExclamation": "已关闭!",
+ "closed": "已关闭"
+ },
+
+ "delete": {
+ "deleteWithEllipsis": "删除...",
+ "delete": "删除",
+ "deletingWithEllipsis": "删除中...",
+ "deleting": "删除中",
+ "deletedWithExclamation": "已删除!",
+ "deleted": "已删除"
+ },
+
+ "retry": {
+ "retryWithEllipsis": "重试...",
+ "retry": "重试",
+ "retryingWithEllipsis": "重试中...",
+ "retrying": "重试中"
+ },
+
+ "refresh": {
+ "refreshWithEllipsis": "刷新...",
+ "refresh": "刷新",
+ "refreshingWithEllipsis": "刷新中...",
+ "refreshing": "刷新中",
+ "refreshedWithExclamation": "已刷新!",
+ "refreshed": "已刷新"
+ },
+
+ "confirm": {
+ "confirm": "确认",
+ "confirmingWithEllipsis": "确认中...",
+ "confirming": "确认中",
+ "confirmedWithExclamation": "已确认!",
+ "confirmed": "已确认"
+ },
+
+ "copy": {
+ "copyWithEllipsis": "复制...",
+ "copy": "复制",
+ "copyingWithEllipsis": "复制中...",
+ "copying": "复制中",
+ "copiedWithExclamation": "已复制!",
+ "copied": "已复制"
+ },
+
+ "edit": {
+ "editWithEllipsis": "编辑...",
+ "edit": "编辑",
+ "editingWithEllipsis": "编辑中...",
+ "editing": "编辑中",
+ "editedWithExclamation": "已编辑!",
+ "edited": "已编辑"
+ },
+
+ "load": {
+ "loadWithEllipsis": "加载...",
+ "load": "加载",
+ "loadingWithEllipsis": "加载中...",
+ "loading": "加载中",
+ "loadedWithExclamation": "已加载!",
+ "loaded": "已加载"
+ },
+
+ "save": {
+ "saveWithEllipsis": "保存...",
+ "save": "保存",
+ "savingWithEllipsis": "保存中...",
+ "saving": "保存中",
+ "savedWithExclamation": "已保存!",
+ "saved": "已保存"
+ },
+
+ "saveAs": {
+ "saveAsWithEllipsis": "另存为...",
+ "saveAs": "另存为"
+ },
+
+ "saveAsNew": {
+ "saveAsNewWithEllipsis": "保存为新文件...",
+ "saveAsNew": "保存为新文件"
+ },
+
+ "search": {
+ "searchWithEllipsis": "搜索...",
+ "search": "搜索",
+ "searchingWithEllipsis": "搜索中...",
+ "searching": "搜索中"
+ },
+
+ "update": {
+ "updateWithEllipsis": "更新...",
+ "update": "更新",
+ "updatingWithEllipsis": "更新中...",
+ "updating": "更新中",
+ "updatedWithExclamation": "已更新!",
+ "updated": "已更新"
+ },
+
+ "create": {
+ "createWithEllipsis": "创建...",
+ "create": "创建",
+ "creatingWithEllipsis": "创建中...",
+ "creating": "创建中",
+ "createdWithExclamation": "已创建!",
+ "created": "已创建"
+ },
+
+ "reset": {
+ "resetWithEllipsis": "重置...",
+ "reset": "重置",
+ "resettingWithEllipsis": "重置中...",
+ "resetting": "重置中"
+ },
+
+ "pause": {
+ "pause": "暂停",
+ "pausingWithEllipsis": "暂停中...",
+ "pausing": "暂停中",
+ "paused": "已暂停"
+ },
+
+ "download": {
+ "download": "下载",
+ "downloadingWithEllipsis": "下载中...",
+ "downloading": "下载中",
+ "downloadedWithExclamation": "已下载!",
+ "downloaded": "已下载"
+ },
+
+ "upload": {
+ "uploadWithEllipsis": "上传...",
+ "upload": "上传",
+ "uploadingWithEllipsis": "上传中...",
+ "uploading": "上传中",
+ "uploadedWithExclamation": "已上传!",
+ "uploaded": "已上传"
+ },
+
+ "remove": {
+ "removeWithEllipsis": "移除...",
+ "remove": "移除",
+ "removingWithEllipsis": "移除中...",
+ "removing": "移除中",
+ "removedWithExclamation": "已移除!",
+ "removed": "已移除"
+ },
+
+ "uninstall": {
+ "uninstallWithEllipsis": "卸载...",
+ "uninstall": "卸载",
+ "uninstallingWithEllipsis": "卸载中...",
+ "uninstalling": "卸载中",
+ "uninstalledWithExclamation": "已卸载!",
+ "uninstalled": "已卸载"
+ },
+
+ "resume": {
+ "resumeWithEllipsis": "继续...",
+ "resume": "继续",
+ "resumingWithEllipsis": "继续中...",
+ "resuming": "继续中"
+ },
+
+ "start": {
+ "startWithEllipsis": "启动...",
+ "start": "启动",
+ "startingWithEllipsis": "启动中...",
+ "starting": "启动中",
+ "started": "已启动"
+ },
+
+ "stop": {
+ "stopWithEllipsis": "停止...",
+ "stop": "停止",
+ "stoppingWithEllipsis": "停止中...",
+ "stopping": "停止中",
+ "stoppedWithExclamation": "已停止!",
+ "stopped": "已停止"
+ },
+
+ "import": {
+ "importWithEllipsis": "导入...",
+ "import": "导入",
+ "importingWithEllipsis": "导入中...",
+ "importing": "导入中",
+ "importedWithExclamation": "已导入!",
+ "imported": "已导入"
+ },
+
+ "letsGo": {
+ "letsGo": "开始吧",
+ "letsGoWithEllipsis": "开始吧...",
+ "letsGoWithExclamation": "开始吧!"
+ },
+
+ "run": {
+ "runWithEllipsis": "运行...",
+ "run": "运行",
+ "runningWithEllipsis": "运行中...",
+ "running": "运行中"
+ },
+
+ "configure": {
+ "configureWithEllipsis": "配置...",
+ "configure": "配置",
+ "configuringWithEllipsis": "配置中...",
+ "configured": "已配置"
+ },
+
+ "publish": {
+ "publishWithEllipsis": "发布...",
+ "publish": "发布",
+ "publishingWithEllipsis": "发布中...",
+ "publishing": "发布中",
+ "publishedWithExclamation": "已发布!",
+ "published": "已发布"
+ }
+}
diff --git a/zh-CN/chat.json b/zh-CN/chat.json
index eb991101..b7c04562 100644
--- a/zh-CN/chat.json
+++ b/zh-CN/chat.json
@@ -1,246 +1,248 @@
-{
- "modelLoaderPlaceholder": "选择要加载的模型",
- "systemPromptPlaceholder": "设置系统提示",
- "userRoleText": "用户",
- "assistantRoleText": "助手",
- "addMessageButtonText": "添加",
- "addMessageButtonText/toolTip": "在不触发预测的情况下将消息插入上下文中",
- "sendMessageButtonText": "发送",
- "sendMessageButtonText/toolTip": "将您的提示和对话历史发送给模型进行处理",
- "ejectButtonText": "卸载",
- "unloadTooltip": "从内存中卸载模型",
- "cancelButtonText": "取消",
- "loadButtonText": "加载",
- "advancedSegmentText": "高级",
- "chatSegmentText": "聊天",
- "chatSidebarTitle": "聊天列表",
- "newChatButton": "新建聊天",
- "newFolderButton": "新建文件夹",
- "viewModeLabel": "视图模式",
- "noChatSelected": "未选择聊天",
- "chatViewOptions": "聊天视图选项",
- "uiControls/title": "显示设置",
- "noChatSelectedPlaceholder": "请选择一个聊天",
- "unnamedChat": "未命名聊天",
- "emptyFolder": "文件夹为空",
-
- "tokenCount": "token数",
- "messageTokenCount": "输入token数",
- "tokenCount/hint": "消息中的token数量。使用当前选定模型的分词器计算。\n\n需要加载模型。",
- "messageTokenCount/hint": "消息中的token数量。使用当前选定模型的分词器计算。\n\n**不包括**附件中的token估计值。",
-
- "notes": "对话笔记",
- "notes/add/first": "添加笔记",
- "notes/add/another": "再加一条笔记",
- "notes/hint": "保存此聊天的笔记。笔记仅供您参考,不会发送给模型。所有更改将会自动保存。",
- "notes/placeholder": "在这里键入您的笔记...",
- "notes/delete": "删除笔记",
- "notes/noteLabel": "笔记",
- "notes/copyContent": "复制笔记内容",
-
- "actions/sendMessage/error": "发送消息失败",
- "actions/loadModel/error": "🥲 加载模型失败",
- "actions/addFile": "[实验性] 将文件附加到此消息\n(.pdf, 纯文本, 或 .docx)",
- "actions/addFile/label": "附加文件",
- "actions/changeRole": "在用户和助手角色之间切换。\n\n这对于引导对话朝特定方向发展非常有用。\n\n可用于构建‘少样本学习’或‘情境学习’场景",
- "actions/addImage": "添加图片",
- "actions/deleteMessage": "删除消息",
- "actions/deleteMessage/confirmation": "您确定要删除这条消息吗?",
- "actions/copyMessage": "复制消息",
- "actions/editMessage": "编辑消息",
- "actions/editMessage/cannotEditPreprocessed": "无法编辑预处理的消息,因为它们在运行预处理器后会被覆盖。要编辑消息,您可以:\n\n - 切换到原始消息并对其进行编辑,或者\n - 更改预处理器,使其产生所需的输出。",
- "actions/regenerateMessage": "重新生成消息",
- "actions/regenerateMessage/error": "重新生成消息失败",
- "actions/branchChat": "在此消息之后分支聊天",
- "actions/branchChat/error": "分支聊天失败",
- "actions/continueAssistantMessage": "继续助手消息",
- "actions/continueAssistantMessage/error": "继续助手消息失败",
- "actions/predictNext": "生成AI响应",
- "actions/predictNext/error": "生成AI响应失败",
- "actions/loadLastModel": "重新加载上次使用的模型",
- "actions/loadLastModel/tooltip": "点击以加载上次与该聊天一起使用的模型:\n\n{{lastModel}}",
- "actions/loadLastModel/error": "加载上次使用的模型失败。",
- "actions/continueCurrentModel": "使用当前模型",
- "actions/continueCurrentModel/tooltip": "当前模型:{{currentModel}}",
- "actions/changeToLastUsedModel": "加载 {{lastModel}}",
- "actions/changeToLastUsedModel/error": "切换到上次使用的模型失败。",
- "actions/changeToLastUsedModel/tooltip": "您上次在这个聊天中发送消息时使用了不同的模型。点击以卸载当前选定的模型({{currentModel}})并加载上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
- "actions/switchToLastUsedModel": "切换到 {{lastModel}}",
- "actions/switchToLastUsedModel/tooltip": "点击以切换到上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
- "actions/loadModel": "加载模型",
- "actions/toggleViewingProcessed/currentlyFalse": "当前查看的是原始消息。点击以查看预处理后的消息。",
- "actions/toggleViewingProcessed/currentlyTrue": "当前查看的是预处理后的消息。点击以查看原始消息。",
- "actions/toggleViewingProcessed/hint": "在消息发送给模型之前,它可能会被提示预处理器预处理。点击以切换查看原始消息和预处理后的消息。只有预处理后的消息会发送给模型。",
- "editMessageConfirm/title": "保留更改?",
- "editMessageConfirm/message": "您已对消息进行了更改。您想要保留这些更改吗?",
- "editMessageConfirm/keepEditing": "继续编辑",
- "editMessageConfirm/save": "保存",
- "editMessageConfirm/discard": "放弃更改",
- "tokenCount/totalNotAvailable": "token:{{current}}",
- "tokenCount/totalAvailable": "token:{{current}}/{{total}}",
- "tokenCount/totalAvailablePercentage": "上下文已满 {{percentage}}%",
- "tokenCount/contextOverflow": "未经处理的上下文大于模型的最大token限制。根据您的上下文溢出策略,上下文可能会被截断,或者消息可能不会被发送。",
- "modelLoader/manualLoadParams/label": "手动选择模型加载参数",
- "modelLoader/manualLoadParams/hint/before": "(或按住",
- "modelLoader/manualLoadParams/hint/after": ")",
- "actions/move/error": "移动失败",
- "actions/rename/error": "重命名失败",
- "actions/createChatAtRoot": "新建聊天...",
- "actions/createChatAtRoot/error": "在根目录创建聊天失败",
- "actions/createFolderAtRoot": "新建文件夹...",
- "actions/createFolderAtRoot/error": "在根目录创建文件夹失败",
- "actions/openInFolder/mac": "在 Finder 中显示",
- "actions/openInFolder/pc": "在文件资源管理器中显示",
-
- "actions/createChat/error": "创建聊天失败",
- "actions/deleteChat/errorTitle": "删除聊天失败",
-
- "userFile/fileSizeLimit": "文件大小限制为",
- "userFile/noImageSupport": "模型不支持图片输入",
- "userFile/errorPrefix": "错误 - ",
- "userFile/supportedImagePrefix": "不支持的图片类型 - 仅支持",
- "userFile/supportedImageSuffix": "。",
- "userFile/unsupportedFileType": "不支持的文件类型 - 仅支持图片、PDF 和 .txt 文件。",
- "userFile/maxFilesPerMessage": "每条消息的最大文件数已达到。不能添加超过 {{files}} 个文件。",
- "userFile/maxFileSizePerMessage": "每条消息的最大文件大小已达到。不能添加超过 {{size}} 的文件。",
- "userFile/maxFileSizePerConversation": "会话的文件大小已达上限,无法添加大于 {{size}} 的文件。",
- "userFile/failedToUploadError/title": "文件上传失败",
- "userFile/failedToAddFile/title": "文件添加到对话失败",
- "errorTitle": "错误",
- "userFile/chatTerminalDocumentsCount_one": "对话中有 {{count}} 个文档",
- "userFile/chatTerminalDocumentsCount_other": "对话中有 {{count}} 个文档",
-
- "prediction/busyModel/title": "模型忙碌中",
- "prediction/busyModel/message": "请等待模型完成后再试",
- "prediction/noModel/title": "未选择模型",
- "prediction/modelLoading": "消息已排队,将在模型加载完成后发送",
- "prediction/noModel/message": "选择一个模型以发送消息",
- "prediction/unloadModel/error": "卸载模型失败",
-
- "retrieval/user/processingLabel": "AI 正在思考...",
- "retrieval/powerUser/intermediateStepsHidden": "中间步骤已隐藏。点击以展开。",
- "retrieval/actions/clickToExpand": "点击以展开中间步骤",
- "retrieval/actions/clickToCollapse": "点击以折叠中间步骤",
-
- "style": "聊天外观",
-
- "style/viewMode/markdown": "Markdown",
- "style/viewMode/plaintext": "纯文本",
- "style/viewMode/monospace": "等宽字体",
-
- "speculativeDecodingVisualization/toggle": "可视化已采纳的草稿token",
- "speculativeDecodingVisualization/fromDraftModel_one": "已采纳的草稿token",
- "speculativeDecodingVisualization/fromDraftModel_other": "已采纳的草稿token",
- "speculativeDecodingVisualization/cannotChangeViewMode": "可视化草稿token时无法切换显示模式。",
-
- "style/fontSize/label": "字体大小",
- "style/fontSize/medium": "默认",
- "style/fontSize/large": "大",
- "style/fontSize/small": "小",
-
- "style/debugBlocks/label": "显示调试信息块",
-
- "style/thinkingUI/label": "默认展开推理块",
- "style/chatFullWidth/label": "聊天容器宽度适应窗口",
-
- "style/chatUtilityMenusShowLabel/label": "显示聊天实用工具菜单",
-
- "messageBlocks": {
- "expandBlockTooltip": "展开内容",
- "collapseBlockTooltip": "收起内容",
- "debug": {
- "label": "调试信息",
- "collapseTooltip": "收起调试信息块",
- "expandTooltip": "展开调试信息块"
- }
- },
-
- "chatTabOptions/clearAllMessages": "清空所有聊天记录...",
- "chatTabOptions/duplicateChat": "复制聊天",
-
- "topBarActions/duplicateChat": "复制聊天",
- "topBarActions/clearChat": "清除所有消息",
- "topBarActions/clearChatConfirmation": "您确定要清除此聊天中的所有消息吗?",
- "topBarActions/clearChatCancel": "取消",
- "topBarActions/clearChatDelete": "全部清除",
-
- "noModels.indexing": "正在索引模型文件...(这可能需要一段时间)",
- "noModels.downloading": "正在下载您的第一个LLM...",
- "noModels": "还没有LLM!下载一个开始吧!",
-
- "plugins": {
- "pluginTrigger": {
- "noPlugins": "插件",
- "multiplePlugins": "{{dynamicValue}} 个插件"
- },
- "pluginSelect": {
- "title": "插件",
- "dropdown": {
- "configure": "配置",
- "disable": "禁用",
- "fork": "派生",
- "uninstall": "卸载"
- },
- "actionButtons": {
- "create": "+新建",
- "import": "导入",
- "discover": "发现"
- },
- "recentlyCreated": {
- "title": "最近创建的插件",
- "placeholder": "你创建的插件会显示在这里"
- },
- "startRunningDevelopmentPlugin/error": "开发模式插件启动失败",
- "stopRunningDevelopmentPlugin/error": "开发模式插件停止失败",
- "forceReInitPlugin/error": "重启插件失败"
- },
- "pluginConfiguration": {
- "title": "插件配置",
- "selectAPlugin": "选择一个插件以编辑其配置",
- "preprocessorAndGenerator": "此插件包含自定义预处理器和生成器",
- "generatorOnly": "此插件包含自定义生成器",
- "preprocessorOnly": "此插件包含自定义预处理器"
- },
- "instructions": {
- "runTheFollowing": "要运行你的插件,请打开终端并输入:",
- "pushTo": "将插件推送到 Hub 与他人分享(可选)",
- "createdSuccessfully": "插件创建成功",
- "creatingPlugin": "正在创建插件...",
- "projectFilesTitle": "项目文件",
- "buttons": {
- "documentation": "文档",
- "dismiss": "关闭",
- "publish": "发布",
- "openInZed": "在 Zed 中打开",
- "openInVscode": "在 VS Code 中打开",
- "revealInFinder": "在 Finder 中显示",
- "openInFileExplorer": "在文件资源管理器中打开"
- }
- },
- "localFork": {
- "error": "创建插件的本地副本失败"
- },
- "restartErrorPlugin/error": "重启插件失败"
- },
-
- "genInfo": {
- "tokensPerSecond": "{{tokensPerSecond}} token/s",
- "predictedTokensCount": "{{predictedTokensCount}} token",
- "timeToFirstTokenSec": "首个token用时 {{timeToFirstTokenSec}} s",
- "stopReason": "停止原因: {{stopReason}}",
- "stopReason.userStopped": "用户已停止",
- "stopReason.modelUnloaded": "模型已卸载",
- "stopReason.failed": "生成失败",
- "stopReason.eosFound": "检测到 EOS token",
- "stopReason.stopStringFound": "发现停止字符串",
- "stopReason.toolCalls": "工具调用",
- "stopReason.maxPredictedTokensReached": "达到最大预测词元",
- "stopReason.contextLengthReached": "达到上下文长度上限",
- "speculativeDecodedBy": "草稿模型:{{decodedBy}}",
- "speculativeDecodingStats": "已采纳 {{accepted}}/{{total}} 个草稿token({{percentage}}%)"
- },
-
- "tabs": {
- "systemPromptEditorTab.headerLabel": "编辑系统提示词"
- }
-}
+{
+ "modelLoaderPlaceholder": "选择要加载的模型",
+ "systemPromptPlaceholder": "设置系统提示",
+ "userRoleText": "用户",
+ "assistantRoleText": "助手",
+ "addMessageButtonText": "添加",
+ "addMessageButtonText/toolTip": "在不触发预测的情况下将消息插入上下文中",
+ "sendMessageButtonText": "发送",
+ "sendMessageButtonText/toolTip": "将您的提示和对话历史发送给模型进行处理",
+ "ejectButtonText": "卸载",
+ "unloadTooltip": "从内存中卸载模型",
+ "cancelButtonText": "取消",
+ "loadButtonText": "加载",
+ "advancedSegmentText": "高级",
+ "chatSegmentText": "聊天",
+ "chatSidebarTitle": "聊天列表",
+ "newChatButton": "新建聊天",
+ "newFolderButton": "新建文件夹",
+ "viewModeLabel": "视图模式",
+ "noChatSelected": "未选择聊天",
+ "chatViewOptions": "聊天视图选项",
+ "uiControls/title": "显示设置",
+ "noChatSelectedPlaceholder": "请选择一个聊天",
+ "unnamedChat": "未命名聊天",
+ "emptyFolder": "文件夹为空",
+
+ "tokenCount": "token数",
+ "messageTokenCount": "输入token数",
+ "tokenCount/hint": "消息中的token数量。使用当前选定模型的分词器计算。\n\n需要加载模型。",
+ "messageTokenCount/hint": "消息中的token数量。使用当前选定模型的分词器计算。\n\n**不包括**附件中的token估计值。",
+
+ "notes": "对话笔记",
+ "notes/add/first": "添加笔记",
+ "notes/add/another": "再加一条笔记",
+ "notes/hint": "保存此聊天的笔记。笔记仅供您参考,不会发送给模型。所有更改将会自动保存。",
+ "notes/placeholder": "在这里键入您的笔记...",
+ "notes/delete": "删除笔记",
+ "notes/noteLabel": "笔记",
+ "notes/copyContent": "复制笔记内容",
+
+ "actions/sendMessage/error": "发送消息失败",
+ "actions/loadModel/error": "🥲 加载模型失败",
+ "actions/addFile": "[实验性] 将文件附加到此消息\n(.pdf, 纯文本, 或 .docx)",
+ "actions/addFile/label": "附加文件",
+ "actions/changeRole": "在用户和助手角色之间切换。\n\n这对于引导对话朝特定方向发展非常有用。\n\n可用于构建‘少样本学习’或‘情境学习’场景",
+ "actions/addImage": "添加图片",
+ "actions/deleteMessage": "删除消息",
+ "actions/deleteMessage/confirmation": "您确定要删除这条消息吗?",
+ "actions/copyMessage": "复制消息",
+ "actions/editMessage": "编辑消息",
+ "actions/editMessage/cannotEditPreprocessed": "无法编辑预处理的消息,因为它们在运行预处理器后会被覆盖。要编辑消息,您可以:\n\n - 切换到原始消息并对其进行编辑,或者\n - 更改预处理器,使其产生所需的输出。",
+ "actions/regenerateMessage": "重新生成消息",
+ "actions/regenerateMessage/error": "重新生成消息失败",
+ "actions/branchChat": "在此消息之后分支聊天",
+ "actions/branchChat/error": "分支聊天失败",
+ "actions/continueAssistantMessage": "继续助手消息",
+ "actions/continueAssistantMessage/error": "继续助手消息失败",
+ "actions/predictNext": "生成AI响应",
+ "actions/predictNext/error": "生成AI响应失败",
+ "actions/loadLastModel": "重新加载上次使用的模型",
+ "actions/loadLastModel/tooltip": "点击以加载上次与该聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/loadLastModel/error": "加载上次使用的模型失败。",
+ "actions/continueCurrentModel": "使用当前模型",
+ "actions/continueCurrentModel/tooltip": "当前模型:{{currentModel}}",
+ "actions/changeToLastUsedModel": "加载 {{lastModel}}",
+ "actions/changeToLastUsedModel/error": "切换到上次使用的模型失败。",
+ "actions/changeToLastUsedModel/tooltip": "您上次在这个聊天中发送消息时使用了不同的模型。点击以卸载当前选定的模型({{currentModel}})并加载上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/switchToLastUsedModel": "切换到 {{lastModel}}",
+ "actions/switchToLastUsedModel/tooltip": "点击以切换到上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/loadModel": "加载模型",
+ "actions/clearLastUsedModel": "清除上次使用的模型",
+ "actions/clearLastUsedModel/error": "清除上次使用的模型失败。",
+ "actions/toggleViewingProcessed/currentlyFalse": "当前查看的是原始消息。点击以查看预处理后的消息。",
+ "actions/toggleViewingProcessed/currentlyTrue": "当前查看的是预处理后的消息。点击以查看原始消息。",
+ "actions/toggleViewingProcessed/hint": "在消息发送给模型之前,它可能会被提示预处理器预处理。点击以切换查看原始消息和预处理后的消息。只有预处理后的消息会发送给模型。",
+ "editMessageConfirm/title": "保留更改?",
+ "editMessageConfirm/message": "您已对消息进行了更改。您想要保留这些更改吗?",
+ "editMessageConfirm/keepEditing": "继续编辑",
+ "editMessageConfirm/save": "保存",
+ "editMessageConfirm/discard": "放弃更改",
+ "tokenCount/totalNotAvailable": "token:{{current}}",
+ "tokenCount/totalAvailable": "token:{{current}}/{{total}}",
+ "tokenCount/totalAvailablePercentage": "上下文已满 {{percentage}}%",
+ "tokenCount/contextOverflow": "未经处理的上下文大于模型的最大token限制。根据您的上下文溢出策略,上下文可能会被截断,或者消息可能不会被发送。",
+ "modelLoader/manualLoadParams/label": "手动选择模型加载参数",
+ "modelLoader/manualLoadParams/hint/before": "(或按住",
+ "modelLoader/manualLoadParams/hint/after": ")",
+ "actions/move/error": "移动失败",
+ "actions/rename/error": "重命名失败",
+ "actions/createChatAtRoot": "新建聊天...",
+ "actions/createChatAtRoot/error": "在根目录创建聊天失败",
+ "actions/createFolderAtRoot": "新建文件夹...",
+ "actions/createFolderAtRoot/error": "在根目录创建文件夹失败",
+ "actions/openInFolder/mac": "在 Finder 中显示",
+ "actions/openInFolder/pc": "在文件资源管理器中显示",
+
+ "actions/createChat/error": "创建聊天失败",
+ "actions/deleteChat/errorTitle": "删除聊天失败",
+
+ "userFile/fileSizeLimit": "文件大小限制为",
+ "userFile/noImageSupport": "模型不支持图片输入",
+ "userFile/errorPrefix": "错误 - ",
+ "userFile/supportedImagePrefix": "不支持的图片类型 - 仅支持",
+ "userFile/supportedImageSuffix": "。",
+ "userFile/unsupportedFileType": "不支持的文件类型 - 仅支持图片、PDF 和 .txt 文件。",
+ "userFile/maxFilesPerMessage": "每条消息的最大文件数已达到。不能添加超过 {{files}} 个文件。",
+ "userFile/maxFileSizePerMessage": "每条消息的最大文件大小已达到。不能添加超过 {{size}} 的文件。",
+ "userFile/maxFileSizePerConversation": "会话的文件大小已达上限,无法添加大于 {{size}} 的文件。",
+ "userFile/failedToUploadError/title": "文件上传失败",
+ "userFile/failedToAddFile/title": "文件添加到对话失败",
+ "errorTitle": "错误",
+ "userFile/chatTerminalDocumentsCount_one": "对话中有 {{count}} 个文档",
+ "userFile/chatTerminalDocumentsCount_other": "对话中有 {{count}} 个文档",
+
+ "prediction/busyModel/title": "模型忙碌中",
+ "prediction/busyModel/message": "请等待模型完成后再试",
+ "prediction/noModel/title": "未选择模型",
+ "prediction/modelLoading": "消息已排队,将在模型加载完成后发送",
+ "prediction/noModel/message": "选择一个模型以发送消息",
+ "prediction/unloadModel/error": "卸载模型失败",
+
+ "retrieval/user/processingLabel": "AI 正在思考...",
+ "retrieval/powerUser/intermediateStepsHidden": "中间步骤已隐藏。点击以展开。",
+ "retrieval/actions/clickToExpand": "点击以展开中间步骤",
+ "retrieval/actions/clickToCollapse": "点击以折叠中间步骤",
+
+ "style": "聊天外观",
+
+ "style/viewMode/markdown": "Markdown",
+ "style/viewMode/plaintext": "纯文本",
+ "style/viewMode/monospace": "等宽字体",
+
+ "speculativeDecodingVisualization/toggle": "可视化已采纳的草稿token",
+ "speculativeDecodingVisualization/fromDraftModel_one": "已采纳的草稿token",
+ "speculativeDecodingVisualization/fromDraftModel_other": "已采纳的草稿token",
+ "speculativeDecodingVisualization/cannotChangeViewMode": "可视化草稿token时无法切换显示模式。",
+
+ "style/fontSize/label": "字体大小",
+ "style/fontSize/medium": "默认",
+ "style/fontSize/large": "大",
+ "style/fontSize/small": "小",
+
+ "style/debugBlocks/label": "显示调试信息块",
+
+ "style/thinkingUI/label": "默认展开推理块",
+ "style/chatFullWidth/label": "聊天容器宽度适应窗口",
+
+ "style/chatUtilityMenusShowLabel/label": "显示聊天实用工具菜单",
+
+ "messageBlocks": {
+ "expandBlockTooltip": "展开内容",
+ "collapseBlockTooltip": "收起内容",
+ "debug": {
+ "label": "调试信息",
+ "collapseTooltip": "收起调试信息块",
+ "expandTooltip": "展开调试信息块"
+ }
+ },
+
+ "chatTabOptions/clearAllMessages": "清空所有聊天记录...",
+ "chatTabOptions/duplicateChat": "复制聊天",
+
+ "topBarActions/duplicateChat": "复制聊天",
+ "topBarActions/clearChat": "清除所有消息",
+ "topBarActions/clearChatConfirmation": "您确定要清除此聊天中的所有消息吗?",
+ "topBarActions/clearChatCancel": "取消",
+ "topBarActions/clearChatDelete": "全部清除",
+
+ "noModels.indexing": "正在索引模型文件...(这可能需要一段时间)",
+ "noModels.downloading": "正在下载您的第一个LLM...",
+ "noModels": "还没有LLM!下载一个开始吧!",
+
+ "plugins": {
+ "pluginTrigger": {
+ "noPlugins": "插件",
+ "multiplePlugins": "{{dynamicValue}} 个插件"
+ },
+ "pluginSelect": {
+ "title": "插件",
+ "dropdown": {
+ "configure": "配置",
+ "disable": "禁用",
+ "fork": "派生",
+ "uninstall": "卸载"
+ },
+ "actionButtons": {
+ "create": "+新建",
+ "import": "导入",
+ "discover": "发现"
+ },
+ "recentlyCreated": {
+ "title": "最近创建的插件",
+ "placeholder": "你创建的插件会显示在这里"
+ },
+ "startRunningDevelopmentPlugin/error": "开发模式插件启动失败",
+ "stopRunningDevelopmentPlugin/error": "开发模式插件停止失败",
+ "forceReInitPlugin/error": "重启插件失败"
+ },
+ "pluginConfiguration": {
+ "title": "插件配置",
+ "selectAPlugin": "选择一个插件以编辑其配置",
+ "preprocessorAndGenerator": "此插件包含自定义预处理器和生成器",
+ "generatorOnly": "此插件包含自定义生成器",
+ "preprocessorOnly": "此插件包含自定义预处理器"
+ },
+ "instructions": {
+ "runTheFollowing": "要运行你的插件,请打开终端并输入:",
+ "pushTo": "将插件推送到 Hub 与他人分享(可选)",
+ "createdSuccessfully": "插件创建成功",
+ "creatingPlugin": "正在创建插件...",
+ "projectFilesTitle": "项目文件",
+ "buttons": {
+ "documentation": "文档",
+ "dismiss": "关闭",
+ "publish": "发布",
+ "openInZed": "在 Zed 中打开",
+ "openInVscode": "在 VS Code 中打开",
+ "revealInFinder": "在 Finder 中显示",
+ "openInFileExplorer": "在文件资源管理器中打开"
+ }
+ },
+ "localFork": {
+ "error": "创建插件的本地副本失败"
+ },
+ "restartErrorPlugin/error": "重启插件失败"
+ },
+
+ "genInfo": {
+ "tokensPerSecond": "{{tokensPerSecond}} token/s",
+ "predictedTokensCount": "{{predictedTokensCount}} token",
+ "timeToFirstTokenSec": "首个token用时 {{timeToFirstTokenSec}} s",
+ "stopReason": "停止原因: {{stopReason}}",
+ "stopReason.userStopped": "用户已停止",
+ "stopReason.modelUnloaded": "模型已卸载",
+ "stopReason.failed": "生成失败",
+ "stopReason.eosFound": "检测到 EOS token",
+ "stopReason.stopStringFound": "发现停止字符串",
+ "stopReason.toolCalls": "工具调用",
+ "stopReason.maxPredictedTokensReached": "达到最大预测词元",
+ "stopReason.contextLengthReached": "达到上下文长度上限",
+ "speculativeDecodedBy": "草稿模型:{{decodedBy}}",
+ "speculativeDecodingStats": "已采纳 {{accepted}}/{{total}} 个草稿token({{percentage}}%)"
+ },
+
+ "tabs": {
+ "systemPromptEditorTab.headerLabel": "编辑系统提示词"
+ }
+}
diff --git a/zh-CN/config.json b/zh-CN/config.json
index 48a45d2f..942101cd 100644
--- a/zh-CN/config.json
+++ b/zh-CN/config.json
@@ -1,614 +1,621 @@
-{
- "noInstanceSelected": "未选择模型实例",
- "resetToDefault": "重置",
- "showAdvancedSettings": "显示高级设置",
- "showAll": "全部",
- "basicSettings": "基础",
- "configSubtitle": "加载或保存预设并尝试模型参数覆盖",
- "inferenceParameters/title": "预测参数",
- "inferenceParameters/info": "尝试影响预测的参数。",
- "generalParameters/title": "通用",
- "samplingParameters/title": "采样",
- "basicTab": "基础",
- "advancedTab": "高级",
- "advancedTab/title": "🧪 高级配置",
- "advancedTab/expandAll": "展开所有",
- "advancedTab/overridesTitle": "配置覆盖",
- "advancedTab/noConfigsText": "您没有未保存的更改 - 编辑上方值以在此处查看覆盖。",
- "loadInstanceFirst": "加载模型以查看可配置参数",
- "noListedConfigs": "无可配置参数",
- "generationParameters/info": "尝试影响文本生成的基础参数。",
- "loadParameters/title": "加载参数",
- "loadParameters/description": "控制模型初始化和加载到内存的方式的设置。",
- "loadParameters/reload": "重新加载以应用更改",
- "loadParameters/reload/error": "重新加载模型失败",
- "discardChanges": "放弃更改",
- "loadModelToSeeOptions": "加载模型以查看选项",
- "schematicsError.title": "配置结构在以下字段存在错误:",
- "manifestSections": {
- "structuredOutput/title": "结构化输出",
- "speculativeDecoding/title": "投机解码",
- "sampling/title": "采样",
- "settings/title": "设置",
- "toolUse/title": "工具调用",
- "promptTemplate/title": "提示词模板",
- "customFields/title": "自定义字段"
- },
-
- "llm.prediction.systemPrompt/title": "系统提示",
- "llm.prediction.systemPrompt/description": "使用此字段向模型提供背景指令,如一套规则、约束或一般要求。",
- "llm.prediction.systemPrompt/subTitle": "AI 指南",
- "llm.prediction.systemPrompt/openEditor": "编辑器",
- "llm.prediction.systemPrompt/closeEditor": "关闭编辑器",
- "llm.prediction.systemPrompt/openedEditor": "在编辑器中打开...",
- "llm.prediction.systemPrompt/edit": "编辑系统提示符...",
- "llm.prediction.systemPrompt/addInstructionsWithMore": "添加说明...",
- "llm.prediction.systemPrompt/addInstructions": "添加说明",
- "llm.prediction.temperature/title": "温度",
- "llm.prediction.temperature/subTitle": "引入多少随机性。0 将始终产生相同的结果,而较高值将增加创造性和变化。",
- "llm.prediction.temperature/info": "来自 llama.cpp 帮助文档:\"默认值为 <{{dynamicValue}}>,它在随机性和确定性之间提供了平衡。极端情况下,温度为 0 会始终选择最可能的下一个token,导致每次运行的输出相同\"",
- "llm.prediction.llama.sampling/title": "采样",
- "llm.prediction.topKSampling/title": "Top K 采样",
- "llm.prediction.topKSampling/subTitle": "将下一个token限制为模型预测的前 k 个最可能的token。作用类似于温度",
- "llm.prediction.topKSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-k 采样是一种仅从模型预测的前 k 个最可能的token中选择下一个token的文本生成方法。\n\n它有助于减少生成低概率或无意义token的风险,但也可能限制输出的多样性。\n\n更高的 top-k 值(例如,100)将考虑更多token,从而生成更多样化的文本,而较低的值(例如,10)将专注于最可能的token,生成更保守的文本。\n\n• 默认值为 <{{dynamicValue}}>",
- "llm.prediction.llama.cpuThreads/title": "CPU 线程",
- "llm.prediction.llama.cpuThreads/subTitle": "推理期间使用的 CPU 线程数",
- "llm.prediction.llama.cpuThreads/info": "计算期间要使用的线程数。增加线程数并不总是与更好的性能相关联。默认值为 <{{dynamicValue}}>。",
- "llm.prediction.maxPredictedTokens/title": "限制响应长度",
- "llm.prediction.maxPredictedTokens/subTitle": "可选地限制 AI 响应的长度",
- "llm.prediction.maxPredictedTokens/info": "控制聊天机器人的响应最大长度。开启以设置响应的最大长度限制,或关闭以让聊天机器人决定何时停止。",
- "llm.prediction.maxPredictedTokens/inputLabel": "最大响应长度(token)",
- "llm.prediction.maxPredictedTokens/wordEstimate": "约 {{maxWords}} 词",
- "llm.prediction.repeatPenalty/title": "重复惩罚",
- "llm.prediction.repeatPenalty/subTitle": "多大程度上避免重复相同的token",
- "llm.prediction.repeatPenalty/info": "来自 llama.cpp 帮助文档:\"有助于防止模型生成重复或单调的文本。\n\n更高的值(例如,1.5)将更强烈地惩罚重复,而更低的值(例如,0.9)将更为宽容。\" • 默认值为 <{{dynamicValue}}>",
- "llm.prediction.minPSampling/title": "最小 P 采样",
- "llm.prediction.minPSampling/subTitle": "token被选为输出的最低基本概率",
- "llm.prediction.minPSampling/info": "来自 llama.cpp 帮助文档:\n\n相对于最可能token的概率,token被视为考虑的最低概率。必须在 [0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
- "llm.prediction.topPSampling/title": "Top P 采样",
- "llm.prediction.topPSampling/subTitle": "可能的下一个token的最小累积概率。作用类似于温度",
- "llm.prediction.topPSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-p 采样,也称为核心采样,是另一种文本生成方法,从累积概率至少为 p 的token子集中选择下一个token。\n\n这种方法通过同时考虑token的概率和要从中采样的token数量,在多样性和质量之间提供了平衡。\n\n更高的 top-p 值(例如,0.95)将导致更多样化的文本,而较低的值(例如,0.5)将生成更集中和保守的文本。必须在 (0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
- "llm.prediction.stopStrings/title": "停止字符串",
- "llm.prediction.stopStrings/subTitle": "应该停止模型生成更多token的字符串",
- "llm.prediction.stopStrings/info": "遇到特定字符串时将停止模型生成更多token",
- "llm.prediction.stopStrings/placeholder": "输入一个字符串并按 ⏎",
- "llm.prediction.contextOverflowPolicy/title": "上下文溢出",
- "llm.prediction.contextOverflowPolicy/subTitle": "当对话超出模型处理能力时,模型应该如何表现",
- "llm.prediction.contextOverflowPolicy/info": "决定当对话超过模型的工作内存('上下文')大小时该怎么做",
- "llm.prediction.llama.frequencyPenalty/title": "频率惩罚",
- "llm.prediction.llama.presencePenalty/title": "存在惩罚",
- "llm.prediction.llama.tailFreeSampling/title": "尾部自由采样",
- "llm.prediction.llama.locallyTypicalSampling/title": "局部典型采样",
- "llm.prediction.llama.xtcProbability/title": "XTC 采样概率",
- "llm.prediction.llama.xtcProbability/subTitle": "XTC(排除顶选)采样器将在每个生成token时以该概率激活。XTC 采样有助于提升创造力,减少陈词滥调",
- "llm.prediction.llama.xtcProbability/info": "XTC(排除顶选)采样将以该概率在每个token生成时激活。XTC 采样通常可以提升创造力并减少陈词滥调",
- "llm.prediction.llama.xtcThreshold/title": "XTC 采样阈值",
- "llm.prediction.llama.xtcThreshold/subTitle": "XTC(排除顶选)阈值。在 `xtc-probability` 概率下,查找概率介于 `xtc-threshold` 和 0.5 之间的token,并仅保留其中概率最低的一个",
- "llm.prediction.llama.xtcThreshold/info": "XTC(排除顶选)阈值。在 `xtc-probability` 概率下,查找概率介于 `xtc-threshold` 和 0.5 之间的所有token,仅保留概率最低的一个,其余全部移除",
- "llm.prediction.mlx.topKSampling/title": "Top K 采样",
- "llm.prediction.mlx.topKSampling/subTitle": "将下一个token限制为概率最高的前 k 个token。作用类似于温度",
- "llm.prediction.mlx.topKSampling/info": "仅从概率最高的前 k 个token中选择下一个token,作用类似于温度",
- "llm.prediction.onnx.topKSampling/title": "Top K 采样",
- "llm.prediction.onnx.topKSampling/subTitle": "将下一个token限制为前 k 个最可能的token。作用类似于温度",
- "llm.prediction.onnx.topKSampling/info": "来自 ONNX 文档:\n\n保留最高概率词汇表token的数量以进行 top-k 过滤\n\n• 默认情况下此过滤器关闭",
- "llm.prediction.onnx.repeatPenalty/title": "重复惩罚",
- "llm.prediction.onnx.repeatPenalty/subTitle": "多大程度上避免重复相同的token",
- "llm.prediction.onnx.repeatPenalty/info": "更高的值阻止模型重复自身",
- "llm.prediction.onnx.topPSampling/title": "Top P 采样",
- "llm.prediction.onnx.topPSampling/subTitle": "可能的下一个token的最小累积概率。作用类似于温度",
- "llm.prediction.onnx.topPSampling/info": "来自 ONNX 文档:\n\n仅保留累积概率达到或超过 TopP 的最可能token用于生成\n\n• 默认情况下此过滤器关闭",
- "llm.prediction.seed/title": "种子",
- "llm.prediction.structured/title": "结构化输出",
- "llm.prediction.structured/info": "结构化输出",
- "llm.prediction.structured/description": "高级:您可以提供[JSON Schema](https://json-schema.org/learn/miscellaneous-examples)来强制执行模型中的特定输出格式。阅读[留档](https://lmstudio.ai/docs/advanced/structured-output)了解更多",
- "llm.prediction.tools/title": "工具调用",
- "llm.prediction.tools/description": "高级功能:你可以提供 JSON 格式的工具列表,模型可请求调用这些工具。详情请查阅[文档](https://lmstudio.ai/docs/advanced/tool-use)",
- "llm.prediction.tools/serverPageDescriptionAddon": "通过服务端 API 调用时,请将其作为 `tools` 字段传入请求体",
- "llm.prediction.promptTemplate/title": "提示模板",
- "llm.prediction.promptTemplate/subTitle": "聊天中消息发送给模型的格式。更改此设置可能会引入意外行为 - 确保您知道自己在做什么!",
- "llm.prediction.speculativeDecoding.numDraftTokensExact/title": "草稿生成token数",
- "llm.prediction.speculativeDecoding.numDraftTokensExact/subTitle": "每生成一个主模型token,草稿模型生成的token数量。平衡计算量与收益,选择合适的数值",
- "llm.prediction.speculativeDecoding.minContinueDraftingProbability/title": "草稿概率阈值",
- "llm.prediction.speculativeDecoding.minContinueDraftingProbability/subTitle": "仅当token概率高于该阈值时才继续草稿。值越高风险越低,收益也越低",
- "llm.prediction.speculativeDecoding.minDraftLengthToConsider/title": "最小草稿长度",
- "llm.prediction.speculativeDecoding.minDraftLengthToConsider/subTitle": "草稿长度低于该值将被主模型忽略。值越高风险越低,收益也越低",
- "llm.prediction.speculativeDecoding.maxTokensToDraft/title": "最大草稿长度",
- "llm.prediction.speculativeDecoding.maxTokensToDraft/subTitle": "草稿中允许的最大token数。如果所有token概率都高于阈值,则为上限。值越低风险越低,收益也越低",
- "llm.prediction.speculativeDecoding.draftModel/title": "草稿模型",
- "llm.prediction.reasoning.parsing/title": "推理过程解析方式",
- "llm.prediction.reasoning.parsing/subTitle": "控制模型输出中推理过程的解析方式",
-
- "llm.load.mainGpu/title": "主 GPU",
- "llm.load.mainGpu/subTitle": "用于模型计算的 GPU 优先级",
- "llm.load.mainGpu/placeholder": "选择主 GPU...",
- "llm.load.splitStrategy/title": "拆分策略",
- "llm.load.splitStrategy/subTitle": "如何跨 GPU 拆分模型计算",
- "llm.load.splitStrategy/placeholder": "选择拆分策略...",
- "llm.load.offloadKVCacheToGpu/title": "将 KV 缓存卸载到 GPU 内存",
- "llm.load.offloadKVCacheToGpu/subTitle": "将 KV 缓存卸载到 GPU 内存。这可以提高性能但需要更多 GPU 内存",
- "load.gpuStrictVramCap/title": "限制模型卸载至专用 GPU 内存",
- "load.gpuStrictVramCap.customSubTitleOff": "关闭:若专用 GPU 内存已满,允许将模型权重卸载至共享内存",
- "load.gpuStrictVramCap.customSubTitleOn": "开启:系统将限制模型权重的卸载仅限于专用 GPU 内存及 RAM 。上下文仍可能使用共享内存",
- "load.gpuStrictVramCap.customGpuOffloadWarning": "模型的卸载仅限于专用 GPU 内存。实际卸载的层数可能会有所不同",
- "load.allGpusDisabledWarning": "所有 GPU 目前均被禁用。请启用至少一个以进行卸载",
-
- "llm.load.contextLength/title": "上下文长度",
- "llm.load.contextLength/subTitle": "模型可以一次性关注的token最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
- "llm.load.contextLength/info": "指定模型一次可以考虑的最大token数量,影响其处理过程中保留的上下文量",
- "llm.load.contextLength/warning": "设置较高的上下文长度值会对内存使用产生显著影响",
- "llm.load.seed/title": "种子",
- "llm.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机",
- "llm.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
-
- "llm.load.llama.evalBatchSize/title": "评估批处理大小",
- "llm.load.llama.evalBatchSize/subTitle": "每次处理的输入token数量。增加此值会提高性能,但会增加内存使用量",
- "llm.load.llama.evalBatchSize/info": "设置评估期间一起处理的示例数量,影响速度和内存使用",
- "llm.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
- "llm.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
- "llm.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
- "llm.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
- "llm.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
- "llm.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
- "llm.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
- "llm.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
- "llm.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
- "llm.load.llama.flashAttention/title": "快速注意力",
- "llm.load.llama.flashAttention/subTitle": "降低某些模型的内存使用量和生成时间",
- "llm.load.llama.flashAttention/info": "加速注意力机制,实现更快、更高效的处理",
- "llm.load.numExperts/title": "专家数量",
- "llm.load.numExperts/subTitle": "模型中使用的专家数量",
- "llm.load.numExperts/info": "模型中使用的专家数量",
- "llm.load.llama.keepModelInMemory/title": "保持模型在内存中",
- "llm.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
- "llm.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
- "llm.load.llama.useFp16ForKVCache/title": "使用 FP16 用于 KV 缓存",
- "llm.load.llama.useFp16ForKVCache/info": "通过以半精度(FP16)存储缓存来减少内存使用",
- "llm.load.llama.tryMmap/title": "尝试 mmap()",
- "llm.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
- "llm.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
- "llm.load.llama.cpuThreadPoolSize/title": "CPU 线程池大小",
- "llm.load.llama.cpuThreadPoolSize/subTitle": "为模型计算分配的 CPU 线程池线程数",
- "llm.load.llama.cpuThreadPoolSize/info": "分配用于模型计算的 CPU 线程池线程数量。线程数增加未必总能带来更佳性能。默认值为 <{{dynamicValue}}>。",
- "llm.load.llama.kCacheQuantizationType/title": "K 缓存量化类型",
- "llm.load.llama.kCacheQuantizationType/subTitle": "较低的量化类型可降低内存占用,但可能影响模型质量,不同模型效果差异大。",
- "llm.load.llama.vCacheQuantizationType/title": "V 缓存量化类型",
- "llm.load.llama.vCacheQuantizationType/subTitle": "较低的量化类型可降低内存占用,但可能影响模型质量,不同模型效果差异大。",
- "llm.load.llama.vCacheQuantizationType/turnedOnWarning": "⚠️ 如未启用Flash Attention,请务必关闭该选项",
- "llm.load.llama.vCacheQuantizationType/disabledMessage": "仅在启用Flash Attention时可用",
- "llm.load.llama.vCacheQuantizationType/invalidF32MetalState": "⚠️ 使用 F32 时请禁用Flash Attention",
- "llm.load.mlx.kvCacheBits/title": "KV 缓存量化位数",
- "llm.load.mlx.kvCacheBits/subTitle": "KV 缓存量化使用的位数",
- "llm.load.mlx.kvCacheBits/info": "设置 KV 缓存需要量化成的位数",
- "llm.load.mlx.kvCacheBits/turnedOnWarning": "启用 KV 缓存量化时,上下文长度设置将被忽略",
- "llm.load.mlx.kvCacheGroupSize/title": "KV 缓存量化分组大小",
- "llm.load.mlx.kvCacheGroupSize/subTitle": "量化操作时分组的大小,组越大内存占用越低,但模型质量可能下降",
- "llm.load.mlx.kvCacheGroupSize/info": "KV 缓存量化时使用的分组位数",
- "llm.load.mlx.kvCacheQuantizationStart/title": "KV 缓存量化:开始量化的上下文长度",
- "llm.load.mlx.kvCacheQuantizationStart/subTitle": "达到此上下文长度后开始对 KV 缓存进行量化",
- "llm.load.mlx.kvCacheQuantizationStart/info": "达到此上下文长度后开始对 KV 缓存进行量化",
- "llm.load.mlx.kvCacheQuantization/title": "KV 缓存量化",
- "llm.load.mlx.kvCacheQuantization/subTitle": "对模型的 KV 缓存进行量化,可加快生成速度并降低内存占用,但可能影响输出质量。",
- "llm.load.mlx.kvCacheQuantization/bits/title": "KV 缓存量化位数",
- "llm.load.mlx.kvCacheQuantization/bits/tooltip": "KV 缓存量化所用的位数",
- "llm.load.mlx.kvCacheQuantization/bits/bits": "位数",
- "llm.load.mlx.kvCacheQuantization/groupSize/title": "分组策略",
- "llm.load.mlx.kvCacheQuantization/groupSize/accuracy": "高精度",
- "llm.load.mlx.kvCacheQuantization/groupSize/balanced": "均衡",
- "llm.load.mlx.kvCacheQuantization/groupSize/speedy": "极速",
- "llm.load.mlx.kvCacheQuantization/groupSize/tooltip": "高级:量化乘法的分组大小配置\n\n• 高精度 = 分组 32\n• 均衡 = 分组 64\n• 极速 = 分组 128\n",
- "llm.load.mlx.kvCacheQuantization/quantizedStart/title": "达到此上下文长度后开始量化",
- "llm.load.mlx.kvCacheQuantization/quantizedStart/tooltip": "当上下文长度达到该值时,开始对 KV 缓存进行量化",
-
- "embedding.load.contextLength/title": "上下文长度",
- "embedding.load.contextLength/subTitle": "模型可以一次性关注的token最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
- "embedding.load.contextLength/info": "指定模型一次可以考虑的最大token数量,影响其处理过程中保留的上下文量",
- "embedding.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
- "embedding.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
- "embedding.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
- "embedding.load.llama.evalBatchSize/title": "评估批处理大小",
- "embedding.load.llama.evalBatchSize/subTitle": "每次处理的输入token数量。增加此值会提高性能,但会增加内存使用量",
- "embedding.load.llama.evalBatchSize/info": "设置评估期间一起处理的token数量",
- "embedding.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
- "embedding.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
- "embedding.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
- "embedding.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
- "embedding.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
- "embedding.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
- "embedding.load.llama.keepModelInMemory/title": "保持模型在内存中",
- "embedding.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
- "embedding.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
- "embedding.load.llama.tryMmap/title": "尝试 mmap()",
- "embedding.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
- "embedding.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
- "embedding.load.seed/title": "种子",
- "embedding.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机种子",
-
- "embedding.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
-
- "presetTooltip": {
- "included/title": "预设值",
- "included/description": "以下字段将会被应用",
- "included/empty": "在此上下文中,此预设没有适用的字段。",
- "included/conflict": "您将被要求选择是否应用此值",
- "separateLoad/title": "加载时配置",
- "separateLoad/description.1": "预设还包含以下加载时配置。加载时配置是全模型范围的,并且需要重新加载模型才能生效。按住",
- "separateLoad/description.2": "应用到",
- "separateLoad/description.3": "。",
- "excluded/title": "可能不适用",
- "excluded/description": "以下字段包含在预设中,但在当前上下文中不适用。",
- "legacy/title": "旧版预设",
- "legacy/description": "这是一个旧版预设。它包括以下字段,这些字段现在要么自动处理,要么不再适用。",
- "button/publish": "发布到 Hub",
- "button/pushUpdate": "推送更改到 Hub",
- "button/noChangesToPush": "没有可推送的更改",
- "button/export": "导出",
- "hubLabel": "来自 {{user}} 的 Hub 预设",
- "ownHubLabel": "您的 Hub 预设"
- },
-
- "customInputs": {
- "string": {
- "emptyParagraph": "<空>"
- },
- "checkboxNumeric": {
- "off": "关闭"
- },
- "llamaCacheQuantizationType": {
- "off": "关闭"
- },
- "mlxKvCacheBits": {
- "off": "关闭"
- },
- "stringArray": {
- "empty": "<空>"
- },
- "llmPromptTemplate": {
- "type": "类型",
- "types.jinja/label": "模板 (Jinja)",
- "jinja.bosToken/label": "开始token (BOS Token)",
- "jinja.eosToken/label": "结束token (EOS Token)",
- "jinja.template/label": "模板",
- "jinja/error": "解析 Jinja 模板失败: {{error}}",
- "jinja/empty": "请在上方输入一个 Jinja 模板。",
- "jinja/unlikelyToWork": "您提供的 Jinja 模板很可能无法正常工作,因为它没有引用变量 \"messages\"。请检查您输入的模板是否正确。",
- "types.manual/label": "手动",
- "manual.subfield.beforeSystem/label": "系统前缀",
- "manual.subfield.beforeSystem/placeholder": "输入系统前缀...",
- "manual.subfield.afterSystem/label": "系统后缀",
- "manual.subfield.afterSystem/placeholder": "输入系统后缀...",
- "manual.subfield.beforeUser/label": "用户前缀",
- "manual.subfield.beforeUser/placeholder": "输入用户前缀...",
- "manual.subfield.afterUser/label": "用户后缀",
- "manual.subfield.afterUser/placeholder": "输入用户后缀...",
- "manual.subfield.beforeAssistant/label": "助手前缀",
- "manual.subfield.beforeAssistant/placeholder": "输入助手前缀...",
- "manual.subfield.afterAssistant/label": "助手后缀",
- "manual.subfield.afterAssistant/placeholder": "输入助手后缀...",
- "stopStrings/label": "额外停止字符串",
- "stopStrings/subTitle": "除了用户指定的停止字符串之外,还将使用特定于模板的停止字符串。"
- },
- "contextLength": {
- "maxValueTooltip": "这是模型训练所能处理的最大token数量。点击以将上下文设置为此值",
- "maxValueTextStart": "模型支持最多",
- "maxValueTextEnd": "个token",
- "tooltipHint": "尽管模型可能支持一定数量的token,但如果您的机器资源无法处理负载,性能可能会下降 - 增加此值时请谨慎"
- },
- "contextOverflowPolicy": {
- "stopAtLimit": "到达限制时停止",
- "stopAtLimitSub": "一旦模型的内存满载即停止生成",
- "truncateMiddle": "截断中间",
- "truncateMiddleSub": "从对话中间移除消息以为新消息腾出空间。模型仍然会记住对话的开头",
- "rollingWindow": "滚动窗口",
- "rollingWindowSub": "模型将始终接收最近的几条消息,但可能会忘记对话的开头"
- },
- "llamaAccelerationOffloadRatio": {
- "max": "最大",
- "off": "关闭"
- },
- "gpuSplitStrategy": {
- "evenly": "均匀分配",
- "favorMainGpu": "优先主 GPU"
- },
- "speculativeDecodingDraftModel": {
- "readMore": "了解工作原理",
- "placeholder": "选择兼容的草稿模型",
- "noCompatible": "当前模型选择下未找到兼容的草稿模型",
- "stillLoading": "正在识别兼容的草稿模型...",
- "notCompatible": "所选草稿模型()与当前模型选择()不兼容。",
- "off": "关闭",
- "loadModelToSeeOptions": "加载模型 以查看兼容选项",
- "compatibleWithNumberOfModels": "推荐用于至少 {{dynamicValue}} 个模型",
- "recommendedForSomeModels": "推荐用于部分模型",
- "recommendedForLlamaModels": "推荐用于 Llama 模型",
- "recommendedForQwenModels": "推荐用于 Qwen 模型",
- "onboardingModal": {
- "introducing": "新功能介绍",
- "speculativeDecoding": "投机解码",
- "firstStepBody": "llama.cpp 和 MLX 模型推理加速",
- "secondStepTitle": "投机解码能够加速推理",
- "secondStepBody": "投机解码是一种让两个模型协作的技术:\n - 一个规模较大的“主”模型\n - 一个较小的“草稿”模型\n\n生成过程中,草稿模型会快速提出token,由主模型进行验证。验证的过程比实际生成更快。\n**通常,主模型与草稿模型的体积差距越大,加速效果越明显。**\n\n为了保证质量,主模型只会接受与自身结果一致的token,从而实现大模型的响应质量与更快的推理速度。两个模型必须使用相同的词表。",
- "draftModelRecommendationsTitle": "草稿模型推荐",
- "basedOnCurrentModels": "基于您当前的模型",
- "close": "关闭",
- "next": "下一步",
- "done": "完成"
- },
- "speculativeDecodingLoadModelToSeeOptions": "请先加载模型 ",
- "errorEngineNotSupported": "投机解码需引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎()并重新加载模型以使用此功能。",
- "errorEngineNotSupported/noKey": "投机解码需引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎并重新加载模型以使用此功能。"
- },
- "llmReasoningParsing": {
- "startString/label": "起始字符串",
- "startString/placeholder": "请输入起始字符串...",
- "endString/label": "结束字符串",
- "endString/placeholder": "请输入结束字符串..."
- }
- },
- "saveConflictResolution": {
- "title": "选择要包含在预设中的值",
- "description": "挑选并选择要保留的值",
- "instructions": "点击一个值以包含它",
- "userValues": "先前的值",
- "presetValues": "新值",
- "confirm": "确认",
- "cancel": "取消"
- },
- "applyConflictResolution": {
- "title": "保留哪些值?",
- "description": "您有未提交的更改与即将应用的预设有重叠",
- "instructions": "点击一个值以保留它",
- "userValues": "当前值",
- "presetValues": "即将应用的预设值",
- "confirm": "确认",
- "cancel": "取消"
- },
- "empty": "<空>",
- "noModelSelected": "未选择模型",
- "apiIdentifier.label": "API 标识符",
- "apiIdentifier.hint": "可选,为此模型提供一个标识符。该标识符将在 API 请求中使用。留空则使用默认标识符。",
- "idleTTL.label": "空闲时自动卸载",
- "idleTTL.hint": "如设置,模型在空闲指定时间后将自动卸载。",
- "idleTTL.mins": "分钟",
-
- "presets": {
- "title": "预设",
- "commitChanges": "提交更改",
- "commitChanges/description": "将您的更改提交给预设。",
- "commitChanges.manual": "检测到新的字段。您将能够选择要包含在预设中的更改。",
- "commitChanges.manual.hold.0": "按住",
- "commitChanges.manual.hold.1": "选择要提交给预设的更改。",
- "commitChanges.saveAll.hold.0": "按住",
- "commitChanges.saveAll.hold.1": "保存所有更改。",
- "commitChanges.saveInPreset.hold.0": "按住",
- "commitChanges.saveInPreset.hold.1": "仅保存已经包含在预设中的字段的更改。",
- "commitChanges/error": "未能将更改提交给预设。",
- "commitChanges.manual/description": "选择要包含在预设中的更改。",
- "saveAs": "另存为新预设...",
- "presetNamePlaceholder": "为预设输入一个名称...",
- "cannotCommitChangesLegacy": "这是一个旧版预设,无法修改。您可以使用“另存为新预设...”创建一个副本。",
- "cannotCommitChangesNoChanges": "没有更改可以提交。",
- "emptyNoUnsaved": "选择一个预设...",
- "emptyWithUnsaved": "未保存的预设",
- "saveEmptyWithUnsaved": "保存预设为...",
- "saveConfirm": "保存",
- "saveCancel": "取消",
- "saving": "正在保存...",
- "save/error": "未能保存预设。",
- "deselect": "取消选择预设",
- "deselect/error": "取消选择预设失败。",
- "select/error": "选择预设失败。",
- "delete/error": "删除预设失败。",
- "discardChanges": "丢弃未保存的更改",
- "discardChanges/info": "丢弃所有未提交的更改并恢复预设至原始状态",
- "newEmptyPreset": "创建新的空预设...",
- "importPreset": "导入",
- "contextMenuCopyIdentifier": "复制预设标识符",
- "contextMenuSelect": "选择预设",
- "contextMenuDelete": "删除",
- "contextMenuShare": "发布中...",
- "contextMenuOpenInHub": "在 Hub 上查看",
- "contextMenuPullFromHub": "拉取最新版本",
- "contextMenuPushChanges": "推送更改到 Hub",
- "contextMenuPushingChanges": "正在推送...",
- "contextMenuPushedChanges": "更改已推送",
- "contextMenuExport": "导出文件",
- "contextMenuRevealInExplorer": "在文件资源管理器中显示",
- "contextMenuRevealInFinder": "在 Finder 中显示",
- "share": {
- "title": "发布预设",
- "action": "分享你的预设,让他人下载、点赞和fork",
- "presetOwnerLabel": "所有者",
- "uploadAs": "你的预设将以 {{name}} 创建",
- "presetNameLabel": "预设名称",
- "descriptionLabel": "描述(可选)",
- "loading": "正在发布...",
- "success": "预设已成功发布",
- "presetIsLive": " 已在 Hub 上发布!",
- "close": "关闭",
- "confirmViewOnWeb": "在网页上查看",
- "confirmCopy": "复制链接",
- "confirmCopied": "已复制!",
- "pushedToHub": "你的预设已推送到 Hub",
- "descriptionPlaceholder": "请输入描述...",
- "willBePublic": "发布你的预设将使其公开",
- "willBePrivate": "仅您可见",
- "willBeOrgVisible": "组织内成员均可见",
- "publicSubtitle": "你的预设现在为 公开。其他人可以在 lmstudio.ai 下载和 fork 它",
- "privateUsageReached": "私有预设的数量已达上限",
- "continueInBrowser": "在浏览器继续",
- "confirmShareButton": "发布",
- "error": "预设发布失败",
- "createFreeAccount": "请在 Hub 创建免费账号以发布预设"
- },
- "update": {
- "title": "推送更改到 Hub",
- "title/success": "预设已成功更新",
- "subtitle": "修改 并推送到 Hub",
- "descriptionLabel": "描述",
- "descriptionPlaceholder": "请输入描述...",
- "loading": "正在推送...",
- "cancel": "取消",
- "createFreeAccount": "请在 Hub 创建免费账号以发布预设",
- "error": "推送更新失败",
- "confirmUpdateButton": "推送"
- },
- "resolve": {
- "title": "解决冲突...",
- "tooltip": "打开窗口以解决与 Hub 版本的差异"
- },
- "loginToManage": {
- "title": "登录以管理..."
- },
- "downloadFromHub": {
- "title": "下载",
- "downloading": "下载中...",
- "success": "下载完成!",
- "error": "下载失败"
- },
- "push": {
- "title": "推送更改",
- "pushing": "推送中...",
- "success": "推送成功!",
- "tooltip": "将本地更改推送到 Hub 上托管的远程版本",
- "error": "推送失败"
- },
- "saveAsNewModal": {
- "title": "哎呀!在 Hub 上未找到预设",
- "confirmSaveAsNewDescription": "您是否希望将此预设作为新版本发布?",
- "confirmButton": "作为新版本发布"
- },
- "pull": {
- "title": "拉取最新版本",
- "error": "拉取失败",
- "contextMenuErrorMessage": "拉取失败",
- "success": "已拉取",
- "pulling": "拉取中...",
- "upToDate": "已是最新版本!",
- "unsavedChangesModal": {
- "title": "你有未保存的更改。",
- "bodyContent": "从远程拉取的内容将覆盖您的未保存更改,是否继续?",
- "confirmButton": "覆盖未保存的更改"
- }
- },
- "import": {
- "title": "从文件导入预设",
- "dragPrompt": "拖拽预设 JSON 文件或从电脑选择",
- "remove": "移除",
- "cancel": "取消",
- "importPreset_zero": "导入预设",
- "importPreset_one": "导入预设",
- "importPreset_other": "导入 {{count}} 个预设",
- "selectDialog": {
- "title": "选择预设文件(.json 或者 .tar.gz)",
- "button": "导入"
- },
- "error": "导入预设失败",
- "resultsModal": {
- "titleSuccessSection_one": "成功导入 1 个预设",
- "titleSuccessSection_other": "成功导入 {{count}} 个预设",
- "titleFailSection_zero": "",
- "titleFailSection_one": "({{count}} 个失败)",
- "titleFailSection_other": "({{count}} 个失败)",
- "titleAllFailed": "预设导入失败",
- "importMore": "继续导入",
- "close": "完成",
- "successBadge": "成功",
- "alreadyExistsBadge": "预设已存在",
- "errorBadge": "错误",
- "invalidFileBadge": "无效文件",
- "otherErrorBadge": "导入预设失败",
- "errorViewDetailsButton": "查看详情",
- "seeError": "查看错误",
- "noName": "无预设名称",
- "useInChat": "在聊天中使用"
- },
- "importFromUrl": {
- "button": "从 URL 导入...",
- "title": "从 URL 导入",
- "back": "从文件导入...",
- "action": "请在下方粘贴你要导入的 LM Studio Hub 预设链接",
- "invalidUrl": "无效的 URL,请确保输入的是有效的 LM Studio Hub 预设链接。",
- "tip": "你也可以在 LM Studio Hub 直接点击 {{buttonName}} 按钮安装该预设",
- "confirm": "导入",
- "cancel": "取消",
- "loading": "正在导入...",
- "error": "下载预设失败。"
- }
- },
- "download": {
- "title": "从 LM Studio Hub 拉取 ",
- "subtitle": "保存 到你的预设。保存后你可以在应用中使用此预设",
- "button": "拉取",
- "button/loading": "正在拉取...",
- "cancel": "取消",
- "error": "下载预设失败。"
- },
- "inclusiveness": {
- "speculativeDecoding": "包含在预设中"
- }
- },
-
- "flashAttentionWarning": "Flash Attention 是一项实验性功能,可能会导致某些模型出现问题。如果您遇到问题,请尝试禁用它。",
- "llamaKvCacheQuantizationWarning": "KV 缓存量化是一项实验性功能,可能会导致某些模型出现问题。V 缓存量化必须启用 Flash Attention。如果遇到问题,请将默认值重置为\"F16\"。",
-
- "seedUncheckedHint": "随机种子",
- "ropeFrequencyBaseUncheckedHint": "自动",
- "ropeFrequencyScaleUncheckedHint": "自动",
-
- "hardware": {
- "environmentVariables": "环境变量",
- "environmentVariables.info": "如果不确定,请保留默认值",
- "environmentVariables.reset": "重置为默认值",
-
- "gpus.information": "配置检测到的图形处理单元 (GPU)",
- "gpuSettings": {
- "editMaxCapacity": "编辑最大容量",
- "hideEditMaxCapacity": "隐藏最大容量编辑",
- "allOffWarning": "所有 GPU 均已关闭或禁用,请确保分配了至少一个 GPU 以加载模型",
- "split": {
- "title": "分配策略",
- "placeholder": "选择 GPU 内存分配方式",
- "options": {
- "generalDescription": "配置模型将如何加载到您的 GPU 上",
- "evenly": {
- "title": "均匀分配",
- "description": "在多个 GPU 之间均匀分配内存"
- },
- "priorityOrder": {
- "title": "按顺序填充",
- "description": "先在第一个 GPU 上分配内存,然后依次分配到后续 GPU"
- },
- "custom": {
- "title": "自定义",
- "description": "分配内存",
- "maxAllocation": "最大分配"
- }
- }
- },
- "deviceId.info": "此设备的唯一标识符",
- "changesOnlyAffectNewlyLoadedModels": "更改仅影响新加载的模型",
- "toggleGpu": "启用/禁用 GPU"
- }
- },
-
- "load.gpuSplitConfig/title": "GPU 分配配置",
- "envVars/title": "设置环境变量",
- "envVars": {
- "select": {
- "placeholder": "选择环境变量...",
- "noOptions": "无更多可用选项",
- "filter": {
- "placeholder": "过滤搜索结果",
- "resultsFound_zero": "未找到结果",
- "resultsFound_one": "找到 1 个结果",
- "resultsFound_other": "找到 {{count}} 个结果"
- }
- },
- "inputValue": {
- "placeholder": "输入值"
- },
- "values": {
- "title": "当前值"
- }
- }
-}
+{
+ "noInstanceSelected": "未选择模型实例",
+ "resetToDefault": "重置",
+ "showAdvancedSettings": "显示高级设置",
+ "showAll": "全部",
+ "basicSettings": "基础",
+ "configSubtitle": "加载或保存预设并尝试模型参数覆盖",
+ "inferenceParameters/title": "预测参数",
+ "inferenceParameters/info": "尝试影响预测的参数。",
+ "generalParameters/title": "通用",
+ "samplingParameters/title": "采样",
+ "basicTab": "基础",
+ "advancedTab": "高级",
+ "advancedTab/title": "🧪 高级配置",
+ "advancedTab/expandAll": "展开所有",
+ "advancedTab/overridesTitle": "配置覆盖",
+ "advancedTab/noConfigsText": "您没有未保存的更改 - 编辑上方值以在此处查看覆盖。",
+ "loadInstanceFirst": "加载模型以查看可配置参数",
+ "noListedConfigs": "无可配置参数",
+ "generationParameters/info": "尝试影响文本生成的基础参数。",
+ "loadParameters/title": "加载参数",
+ "loadParameters/description": "控制模型初始化和加载到内存的方式的设置。",
+ "loadParameters/reload": "重新加载以应用更改",
+ "loadParameters/reload/error": "重新加载模型失败",
+ "discardChanges": "放弃更改",
+ "loadModelToSeeOptions": "加载模型以查看选项",
+ "schematicsError.title": "配置结构在以下字段存在错误:",
+ "manifestSections": {
+ "structuredOutput/title": "结构化输出",
+ "speculativeDecoding/title": "投机解码",
+ "sampling/title": "采样",
+ "settings/title": "设置",
+ "toolUse/title": "工具调用",
+ "promptTemplate/title": "提示词模板",
+ "customFields/title": "自定义字段"
+ },
+
+ "llm.prediction.systemPrompt/title": "系统提示",
+ "llm.prediction.systemPrompt/description": "使用此字段向模型提供背景指令,如一套规则、约束或一般要求。",
+ "llm.prediction.systemPrompt/subTitle": "AI 指南",
+ "llm.prediction.systemPrompt/openEditor": "编辑器",
+ "llm.prediction.systemPrompt/closeEditor": "关闭编辑器",
+ "llm.prediction.systemPrompt/openedEditor": "在编辑器中打开...",
+ "llm.prediction.systemPrompt/edit": "编辑系统提示符...",
+ "llm.prediction.systemPrompt/addInstructionsWithMore": "添加说明...",
+ "llm.prediction.systemPrompt/addInstructions": "添加说明",
+ "llm.prediction.temperature/title": "温度",
+ "llm.prediction.temperature/subTitle": "引入多少随机性。0 将始终产生相同的结果,而较高值将增加创造性和变化。",
+ "llm.prediction.temperature/info": "来自 llama.cpp 帮助文档:\"默认值为 <{{dynamicValue}}>,它在随机性和确定性之间提供了平衡。极端情况下,温度为 0 会始终选择最可能的下一个token,导致每次运行的输出相同\"",
+ "llm.prediction.llama.sampling/title": "采样",
+ "llm.prediction.topKSampling/title": "Top K 采样",
+ "llm.prediction.topKSampling/subTitle": "将下一个token限制为模型预测的前 k 个最可能的token。作用类似于温度",
+ "llm.prediction.topKSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-k 采样是一种仅从模型预测的前 k 个最可能的token中选择下一个token的文本生成方法。\n\n它有助于减少生成低概率或无意义token的风险,但也可能限制输出的多样性。\n\n更高的 top-k 值(例如,100)将考虑更多token,从而生成更多样化的文本,而较低的值(例如,10)将专注于最可能的token,生成更保守的文本。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.llama.cpuThreads/title": "CPU 线程",
+ "llm.prediction.llama.cpuThreads/subTitle": "推理期间使用的 CPU 线程数",
+ "llm.prediction.llama.cpuThreads/info": "计算期间要使用的线程数。增加线程数并不总是与更好的性能相关联。默认值为 <{{dynamicValue}}>。",
+ "llm.prediction.maxPredictedTokens/title": "限制响应长度",
+ "llm.prediction.maxPredictedTokens/subTitle": "可选地限制 AI 响应的长度",
+ "llm.prediction.maxPredictedTokens/info": "控制聊天机器人的响应最大长度。开启以设置响应的最大长度限制,或关闭以让聊天机器人决定何时停止。",
+ "llm.prediction.maxPredictedTokens/inputLabel": "最大响应长度(token)",
+ "llm.prediction.maxPredictedTokens/wordEstimate": "约 {{maxWords}} 词",
+ "llm.prediction.repeatPenalty/title": "重复惩罚",
+ "llm.prediction.repeatPenalty/subTitle": "多大程度上避免重复相同的token",
+ "llm.prediction.repeatPenalty/info": "来自 llama.cpp 帮助文档:\"有助于防止模型生成重复或单调的文本。\n\n更高的值(例如,1.5)将更强烈地惩罚重复,而更低的值(例如,0.9)将更为宽容。\" • 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.minPSampling/title": "最小 P 采样",
+ "llm.prediction.minPSampling/subTitle": "token被选为输出的最低基本概率",
+ "llm.prediction.minPSampling/info": "来自 llama.cpp 帮助文档:\n\n相对于最可能token的概率,token被视为考虑的最低概率。必须在 [0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.topPSampling/title": "Top P 采样",
+ "llm.prediction.topPSampling/subTitle": "可能的下一个token的最小累积概率。作用类似于温度",
+ "llm.prediction.topPSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-p 采样,也称为核心采样,是另一种文本生成方法,从累积概率至少为 p 的token子集中选择下一个token。\n\n这种方法通过同时考虑token的概率和要从中采样的token数量,在多样性和质量之间提供了平衡。\n\n更高的 top-p 值(例如,0.95)将导致更多样化的文本,而较低的值(例如,0.5)将生成更集中和保守的文本。必须在 (0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.stopStrings/title": "停止字符串",
+ "llm.prediction.stopStrings/subTitle": "应该停止模型生成更多token的字符串",
+ "llm.prediction.stopStrings/info": "遇到特定字符串时将停止模型生成更多token",
+ "llm.prediction.stopStrings/placeholder": "输入一个字符串并按 ⏎",
+ "llm.prediction.contextOverflowPolicy/title": "上下文溢出",
+ "llm.prediction.contextOverflowPolicy/subTitle": "当对话超出模型处理能力时,模型应该如何表现",
+ "llm.prediction.contextOverflowPolicy/info": "决定当对话超过模型的工作内存('上下文')大小时该怎么做",
+ "llm.prediction.llama.frequencyPenalty/title": "频率惩罚",
+ "llm.prediction.llama.presencePenalty/title": "存在惩罚",
+ "llm.prediction.llama.tailFreeSampling/title": "尾部自由采样",
+ "llm.prediction.llama.locallyTypicalSampling/title": "局部典型采样",
+ "llm.prediction.llama.xtcProbability/title": "XTC 采样概率",
+ "llm.prediction.llama.xtcProbability/subTitle": "XTC(排除顶选)采样器将在每个生成token时以该概率激活。XTC 采样有助于提升创造力,减少陈词滥调",
+ "llm.prediction.llama.xtcProbability/info": "XTC(排除顶选)采样将以该概率在每个token生成时激活。XTC 采样通常可以提升创造力并减少陈词滥调",
+ "llm.prediction.llama.xtcThreshold/title": "XTC 采样阈值",
+ "llm.prediction.llama.xtcThreshold/subTitle": "XTC(排除顶选)阈值。在 `xtc-probability` 概率下,查找概率介于 `xtc-threshold` 和 0.5 之间的token,并仅保留其中概率最低的一个",
+ "llm.prediction.llama.xtcThreshold/info": "XTC(排除顶选)阈值。在 `xtc-probability` 概率下,查找概率介于 `xtc-threshold` 和 0.5 之间的所有token,仅保留概率最低的一个,其余全部移除",
+ "llm.prediction.mlx.topKSampling/title": "Top K 采样",
+ "llm.prediction.mlx.topKSampling/subTitle": "将下一个token限制为概率最高的前 k 个token。作用类似于温度",
+ "llm.prediction.mlx.topKSampling/info": "仅从概率最高的前 k 个token中选择下一个token,作用类似于温度",
+ "llm.prediction.onnx.topKSampling/title": "Top K 采样",
+ "llm.prediction.onnx.topKSampling/subTitle": "将下一个token限制为前 k 个最可能的token。作用类似于温度",
+ "llm.prediction.onnx.topKSampling/info": "来自 ONNX 文档:\n\n保留最高概率词汇表token的数量以进行 top-k 过滤\n\n• 默认情况下此过滤器关闭",
+ "llm.prediction.onnx.repeatPenalty/title": "重复惩罚",
+ "llm.prediction.onnx.repeatPenalty/subTitle": "多大程度上避免重复相同的token",
+ "llm.prediction.onnx.repeatPenalty/info": "更高的值阻止模型重复自身",
+ "llm.prediction.onnx.topPSampling/title": "Top P 采样",
+ "llm.prediction.onnx.topPSampling/subTitle": "可能的下一个token的最小累积概率。作用类似于温度",
+ "llm.prediction.onnx.topPSampling/info": "来自 ONNX 文档:\n\n仅保留累积概率达到或超过 TopP 的最可能token用于生成\n\n• 默认情况下此过滤器关闭",
+ "llm.prediction.seed/title": "种子",
+ "llm.prediction.structured/title": "结构化输出",
+ "llm.prediction.structured/info": "结构化输出",
+ "llm.prediction.structured/description": "高级:您可以提供[JSON Schema](https://json-schema.org/learn/miscellaneous-examples)来强制执行模型中的特定输出格式。阅读[留档](https://lmstudio.ai/docs/advanced/structured-output)了解更多",
+ "llm.prediction.tools/title": "工具调用",
+ "llm.prediction.tools/description": "高级功能:你可以提供 JSON 格式的工具列表,模型可请求调用这些工具。详情请查阅[文档](https://lmstudio.ai/docs/advanced/tool-use)",
+ "llm.prediction.tools/serverPageDescriptionAddon": "通过服务端 API 调用时,请将其作为 `tools` 字段传入请求体",
+ "llm.prediction.promptTemplate/title": "提示模板",
+ "llm.prediction.promptTemplate/subTitle": "聊天中消息发送给模型的格式。更改此设置可能会引入意外行为 - 确保您知道自己在做什么!",
+ "llm.prediction.speculativeDecoding.numDraftTokensExact/title": "草稿生成token数",
+ "llm.prediction.speculativeDecoding.numDraftTokensExact/subTitle": "每生成一个主模型token,草稿模型生成的token数量。平衡计算量与收益,选择合适的数值",
+ "llm.prediction.speculativeDecoding.minContinueDraftingProbability/title": "草稿概率阈值",
+ "llm.prediction.speculativeDecoding.minContinueDraftingProbability/subTitle": "仅当token概率高于该阈值时才继续草稿。值越高风险越低,收益也越低",
+ "llm.prediction.speculativeDecoding.minDraftLengthToConsider/title": "最小草稿长度",
+ "llm.prediction.speculativeDecoding.minDraftLengthToConsider/subTitle": "草稿长度低于该值将被主模型忽略。值越高风险越低,收益也越低",
+ "llm.prediction.speculativeDecoding.maxTokensToDraft/title": "最大草稿长度",
+ "llm.prediction.speculativeDecoding.maxTokensToDraft/subTitle": "草稿中允许的最大token数。如果所有token概率都高于阈值,则为上限。值越低风险越低,收益也越低",
+ "llm.prediction.speculativeDecoding.draftModel/title": "草稿模型",
+ "llm.prediction.reasoning.parsing/title": "推理过程解析方式",
+ "llm.prediction.reasoning.parsing/subTitle": "控制模型输出中推理过程的解析方式",
+
+ "llm.load.mainGpu/title": "主 GPU",
+ "llm.load.mainGpu/subTitle": "用于模型计算的 GPU 优先级",
+ "llm.load.mainGpu/placeholder": "选择主 GPU...",
+ "llm.load.splitStrategy/title": "拆分策略",
+ "llm.load.splitStrategy/subTitle": "如何跨 GPU 拆分模型计算",
+ "llm.load.splitStrategy/placeholder": "选择拆分策略...",
+ "llm.load.offloadKVCacheToGpu/title": "将 KV 缓存卸载到 GPU 内存",
+ "llm.load.offloadKVCacheToGpu/subTitle": "将 KV 缓存卸载到 GPU 内存。这可以提高性能但需要更多 GPU 内存",
+ "load.gpuStrictVramCap/title": "限制模型卸载至专用 GPU 内存",
+ "load.gpuStrictVramCap.customSubTitleOff": "关闭:若专用 GPU 内存已满,允许将模型权重卸载至共享内存",
+ "load.gpuStrictVramCap.customSubTitleOn": "开启:系统将限制模型权重的卸载仅限于专用 GPU 内存及 RAM 。上下文仍可能使用共享内存",
+ "load.gpuStrictVramCap.customGpuOffloadWarning": "模型的卸载仅限于专用 GPU 内存。实际卸载的层数可能会有所不同",
+ "load.allGpusDisabledWarning": "所有 GPU 目前均被禁用。请启用至少一个以进行卸载",
+
+ "llm.load.contextLength/title": "上下文长度",
+ "llm.load.contextLength/subTitle": "模型可以一次性关注的token最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
+ "llm.load.contextLength/info": "指定模型一次可以考虑的最大token数量,影响其处理过程中保留的上下文量",
+ "llm.load.contextLength/warning": "设置较高的上下文长度值会对内存使用产生显著影响",
+ "llm.load.seed/title": "种子",
+ "llm.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机",
+ "llm.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
+
+ "llm.load.llama.evalBatchSize/title": "评估批处理大小",
+ "llm.load.llama.evalBatchSize/subTitle": "每次处理的输入token数量。增加此值会提高性能,但会增加内存使用量",
+ "llm.load.llama.evalBatchSize/info": "设置评估期间一起处理的示例数量,影响速度和内存使用",
+ "llm.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
+ "llm.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
+ "llm.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
+ "llm.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
+ "llm.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
+ "llm.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
+ "llm.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
+ "llm.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
+ "llm.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
+ "llm.load.llama.flashAttention/title": "快速注意力",
+ "llm.load.llama.flashAttention/subTitle": "降低某些模型的内存使用量和生成时间",
+ "llm.load.llama.flashAttention/info": "加速注意力机制,实现更快、更高效的处理",
+ "llm.load.numExperts/title": "专家数量",
+ "llm.load.numExperts/subTitle": "模型中使用的专家数量",
+ "llm.load.numExperts/info": "模型中使用的专家数量",
+ "llm.load.llama.keepModelInMemory/title": "保持模型在内存中",
+ "llm.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
+ "llm.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
+ "llm.load.llama.useFp16ForKVCache/title": "使用 FP16 用于 KV 缓存",
+ "llm.load.llama.useFp16ForKVCache/info": "通过以半精度(FP16)存储缓存来减少内存使用",
+ "llm.load.llama.tryMmap/title": "尝试 mmap()",
+ "llm.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
+ "llm.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
+ "llm.load.llama.cpuThreadPoolSize/title": "CPU 线程池大小",
+ "llm.load.llama.cpuThreadPoolSize/subTitle": "为模型计算分配的 CPU 线程池线程数",
+ "llm.load.llama.cpuThreadPoolSize/info": "分配用于模型计算的 CPU 线程池线程数量。线程数增加未必总能带来更佳性能。默认值为 <{{dynamicValue}}>。",
+ "llm.load.llama.kCacheQuantizationType/title": "K 缓存量化类型",
+ "llm.load.llama.kCacheQuantizationType/subTitle": "较低的量化类型可降低内存占用,但可能影响模型质量,不同模型效果差异大。",
+ "llm.load.llama.vCacheQuantizationType/title": "V 缓存量化类型",
+ "llm.load.llama.vCacheQuantizationType/subTitle": "较低的量化类型可降低内存占用,但可能影响模型质量,不同模型效果差异大。",
+ "llm.load.llama.vCacheQuantizationType/turnedOnWarning": "⚠️ 如未启用Flash Attention,请务必关闭该选项",
+ "llm.load.llama.vCacheQuantizationType/disabledMessage": "仅在启用Flash Attention时可用",
+ "llm.load.llama.vCacheQuantizationType/invalidF32MetalState": "⚠️ 使用 F32 时请禁用Flash Attention",
+ "llm.load.useUnifiedKvCache/title": "统一 KV 缓存",
+ "llm.load.useUnifiedKvCache/subTitle": "控制并发预测是否共享单个 KV 缓存以节省内存。禁用此选项可确保每个预测都能利用完整的上下文长度,但会使用更多内存",
+ "llm.load.numParallelSessions/title": "最大并发预测数",
+ "llm.load.numParallelSessions/subTitle": "模型在给定时间可以运行的最大预测数。并发增加时,每个单独预测的速度可能会降低,但每个预测的启动速度会更快,总吞吐量可能更高",
+ "llm.load.numCpuExpertLayersRatio/title": "强制将 MoE 权重加载到 CPU 的层数",
+ "llm.load.numCpuExpertLayersRatio/subTitle": "强制将专家层加载到 CPU 的层数。节省 VRAM,并且可能比部分 GPU 卸载更快。如果模型完全适合 VRAM,则不建议使用。",
+ "llm.load.numCpuExpertLayersRatio/info": "指定强制将专家层加载到 CPU 的层数。将注意力层保留在 GPU 上,在保持推理速度相当快的同时节省 VRAM。",
+ "llm.load.mlx.kvCacheBits/title": "KV 缓存量化位数",
+ "llm.load.mlx.kvCacheBits/subTitle": "KV 缓存量化使用的位数",
+ "llm.load.mlx.kvCacheBits/info": "设置 KV 缓存需要量化成的位数",
+ "llm.load.mlx.kvCacheBits/turnedOnWarning": "启用 KV 缓存量化时,上下文长度设置将被忽略",
+ "llm.load.mlx.kvCacheGroupSize/title": "KV 缓存量化分组大小",
+ "llm.load.mlx.kvCacheGroupSize/subTitle": "量化操作时分组的大小,组越大内存占用越低,但模型质量可能下降",
+ "llm.load.mlx.kvCacheGroupSize/info": "KV 缓存量化时使用的分组位数",
+ "llm.load.mlx.kvCacheQuantizationStart/title": "KV 缓存量化:开始量化的上下文长度",
+ "llm.load.mlx.kvCacheQuantizationStart/subTitle": "达到此上下文长度后开始对 KV 缓存进行量化",
+ "llm.load.mlx.kvCacheQuantizationStart/info": "达到此上下文长度后开始对 KV 缓存进行量化",
+ "llm.load.mlx.kvCacheQuantization/title": "KV 缓存量化",
+ "llm.load.mlx.kvCacheQuantization/subTitle": "对模型的 KV 缓存进行量化,可加快生成速度并降低内存占用,但可能影响输出质量。",
+ "llm.load.mlx.kvCacheQuantization/bits/title": "KV 缓存量化位数",
+ "llm.load.mlx.kvCacheQuantization/bits/tooltip": "KV 缓存量化所用的位数",
+ "llm.load.mlx.kvCacheQuantization/bits/bits": "位数",
+ "llm.load.mlx.kvCacheQuantization/groupSize/title": "分组策略",
+ "llm.load.mlx.kvCacheQuantization/groupSize/accuracy": "高精度",
+ "llm.load.mlx.kvCacheQuantization/groupSize/balanced": "均衡",
+ "llm.load.mlx.kvCacheQuantization/groupSize/speedy": "极速",
+ "llm.load.mlx.kvCacheQuantization/groupSize/tooltip": "高级:量化乘法的分组大小配置\n\n• 高精度 = 分组 32\n• 均衡 = 分组 64\n• 极速 = 分组 128\n",
+ "llm.load.mlx.kvCacheQuantization/quantizedStart/title": "达到此上下文长度后开始量化",
+ "llm.load.mlx.kvCacheQuantization/quantizedStart/tooltip": "当上下文长度达到该值时,开始对 KV 缓存进行量化",
+
+ "embedding.load.contextLength/title": "上下文长度",
+ "embedding.load.contextLength/subTitle": "模型可以一次性关注的token最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
+ "embedding.load.contextLength/info": "指定模型一次可以考虑的最大token数量,影响其处理过程中保留的上下文量",
+ "embedding.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
+ "embedding.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
+ "embedding.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
+ "embedding.load.llama.evalBatchSize/title": "评估批处理大小",
+ "embedding.load.llama.evalBatchSize/subTitle": "每次处理的输入token数量。增加此值会提高性能,但会增加内存使用量",
+ "embedding.load.llama.evalBatchSize/info": "设置评估期间一起处理的token数量",
+ "embedding.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
+ "embedding.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
+ "embedding.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
+ "embedding.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
+ "embedding.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
+ "embedding.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
+ "embedding.load.llama.keepModelInMemory/title": "保持模型在内存中",
+ "embedding.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
+ "embedding.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
+ "embedding.load.llama.tryMmap/title": "尝试 mmap()",
+ "embedding.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
+ "embedding.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
+ "embedding.load.seed/title": "种子",
+ "embedding.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机种子",
+
+ "embedding.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
+
+ "presetTooltip": {
+ "included/title": "预设值",
+ "included/description": "以下字段将会被应用",
+ "included/empty": "在此上下文中,此预设没有适用的字段。",
+ "included/conflict": "您将被要求选择是否应用此值",
+ "separateLoad/title": "加载时配置",
+ "separateLoad/description.1": "预设还包含以下加载时配置。加载时配置是全模型范围的,并且需要重新加载模型才能生效。按住",
+ "separateLoad/description.2": "应用到",
+ "separateLoad/description.3": "。",
+ "excluded/title": "可能不适用",
+ "excluded/description": "以下字段包含在预设中,但在当前上下文中不适用。",
+ "legacy/title": "旧版预设",
+ "legacy/description": "这是一个旧版预设。它包括以下字段,这些字段现在要么自动处理,要么不再适用。",
+ "button/publish": "发布到 Hub",
+ "button/pushUpdate": "推送更改到 Hub",
+ "button/noChangesToPush": "没有可推送的更改",
+ "button/export": "导出",
+ "hubLabel": "来自 {{user}} 的 Hub 预设",
+ "ownHubLabel": "您的 Hub 预设"
+ },
+
+ "customInputs": {
+ "string": {
+ "emptyParagraph": "<空>"
+ },
+ "checkboxNumeric": {
+ "off": "关闭"
+ },
+ "llamaCacheQuantizationType": {
+ "off": "关闭"
+ },
+ "mlxKvCacheBits": {
+ "off": "关闭"
+ },
+ "stringArray": {
+ "empty": "<空>"
+ },
+ "llmPromptTemplate": {
+ "type": "类型",
+ "types.jinja/label": "模板 (Jinja)",
+ "jinja.bosToken/label": "开始token (BOS Token)",
+ "jinja.eosToken/label": "结束token (EOS Token)",
+ "jinja.template/label": "模板",
+ "jinja/error": "解析 Jinja 模板失败: {{error}}",
+ "jinja/empty": "请在上方输入一个 Jinja 模板。",
+ "jinja/unlikelyToWork": "您提供的 Jinja 模板很可能无法正常工作,因为它没有引用变量 \"messages\"。请检查您输入的模板是否正确。",
+ "types.manual/label": "手动",
+ "manual.subfield.beforeSystem/label": "系统前缀",
+ "manual.subfield.beforeSystem/placeholder": "输入系统前缀...",
+ "manual.subfield.afterSystem/label": "系统后缀",
+ "manual.subfield.afterSystem/placeholder": "输入系统后缀...",
+ "manual.subfield.beforeUser/label": "用户前缀",
+ "manual.subfield.beforeUser/placeholder": "输入用户前缀...",
+ "manual.subfield.afterUser/label": "用户后缀",
+ "manual.subfield.afterUser/placeholder": "输入用户后缀...",
+ "manual.subfield.beforeAssistant/label": "助手前缀",
+ "manual.subfield.beforeAssistant/placeholder": "输入助手前缀...",
+ "manual.subfield.afterAssistant/label": "助手后缀",
+ "manual.subfield.afterAssistant/placeholder": "输入助手后缀...",
+ "stopStrings/label": "额外停止字符串",
+ "stopStrings/subTitle": "除了用户指定的停止字符串之外,还将使用特定于模板的停止字符串。"
+ },
+ "contextLength": {
+ "maxValueTooltip": "这是模型训练所能处理的最大token数量。点击以将上下文设置为此值",
+ "maxValueTextStart": "模型支持最多",
+ "maxValueTextEnd": "个token",
+ "tooltipHint": "尽管模型可能支持一定数量的token,但如果您的机器资源无法处理负载,性能可能会下降 - 增加此值时请谨慎"
+ },
+ "contextOverflowPolicy": {
+ "stopAtLimit": "到达限制时停止",
+ "stopAtLimitSub": "一旦模型的内存满载即停止生成",
+ "truncateMiddle": "截断中间",
+ "truncateMiddleSub": "从对话中间移除消息以为新消息腾出空间。模型仍然会记住对话的开头",
+ "rollingWindow": "滚动窗口",
+ "rollingWindowSub": "模型将始终接收最近的几条消息,但可能会忘记对话的开头"
+ },
+ "llamaAccelerationOffloadRatio": {
+ "max": "最大",
+ "off": "关闭"
+ },
+ "gpuSplitStrategy": {
+ "evenly": "均匀分配",
+ "favorMainGpu": "优先主 GPU"
+ },
+ "speculativeDecodingDraftModel": {
+ "readMore": "了解工作原理",
+ "placeholder": "选择兼容的草稿模型",
+ "noCompatible": "当前模型选择下未找到兼容的草稿模型",
+ "stillLoading": "正在识别兼容的草稿模型...",
+ "notCompatible": "所选草稿模型()与当前模型选择()不兼容。",
+ "off": "关闭",
+ "loadModelToSeeOptions": "加载模型 以查看兼容选项",
+ "compatibleWithNumberOfModels": "推荐用于至少 {{dynamicValue}} 个模型",
+ "recommendedForSomeModels": "推荐用于部分模型",
+ "recommendedForLlamaModels": "推荐用于 Llama 模型",
+ "recommendedForQwenModels": "推荐用于 Qwen 模型",
+ "onboardingModal": {
+ "introducing": "新功能介绍",
+ "speculativeDecoding": "投机解码",
+ "firstStepBody": "llama.cpp 和 MLX 模型推理加速",
+ "secondStepTitle": "投机解码能够加速推理",
+ "secondStepBody": "投机解码是一种让两个模型协作的技术:\n - 一个规模较大的“主”模型\n - 一个较小的“草稿”模型\n\n生成过程中,草稿模型会快速提出token,由主模型进行验证。验证的过程比实际生成更快。\n**通常,主模型与草稿模型的体积差距越大,加速效果越明显。**\n\n为了保证质量,主模型只会接受与自身结果一致的token,从而实现大模型的响应质量与更快的推理速度。两个模型必须使用相同的词表。",
+ "draftModelRecommendationsTitle": "草稿模型推荐",
+ "basedOnCurrentModels": "基于您当前的模型",
+ "close": "关闭",
+ "next": "下一步",
+ "done": "完成"
+ },
+ "speculativeDecodingLoadModelToSeeOptions": "请先加载模型 ",
+ "errorEngineNotSupported": "投机解码需引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎()并重新加载模型以使用此功能。",
+ "errorEngineNotSupported/noKey": "投机解码需引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎并重新加载模型以使用此功能。"
+ },
+ "llmReasoningParsing": {
+ "startString/label": "起始字符串",
+ "startString/placeholder": "请输入起始字符串...",
+ "endString/label": "结束字符串",
+ "endString/placeholder": "请输入结束字符串..."
+ }
+ },
+ "saveConflictResolution": {
+ "title": "选择要包含在预设中的值",
+ "description": "挑选并选择要保留的值",
+ "instructions": "点击一个值以包含它",
+ "userValues": "先前的值",
+ "presetValues": "新值",
+ "confirm": "确认",
+ "cancel": "取消"
+ },
+ "applyConflictResolution": {
+ "title": "保留哪些值?",
+ "description": "您有未提交的更改与即将应用的预设有重叠",
+ "instructions": "点击一个值以保留它",
+ "userValues": "当前值",
+ "presetValues": "即将应用的预设值",
+ "confirm": "确认",
+ "cancel": "取消"
+ },
+ "empty": "<空>",
+ "noModelSelected": "未选择模型",
+ "apiIdentifier.label": "API 标识符",
+ "apiIdentifier.hint": "可选,为此模型提供一个标识符。该标识符将在 API 请求中使用。留空则使用默认标识符。",
+ "idleTTL.label": "空闲时自动卸载",
+ "idleTTL.hint": "如设置,模型在空闲指定时间后将自动卸载。",
+ "idleTTL.mins": "分钟",
+
+ "presets": {
+ "title": "预设",
+ "commitChanges": "提交更改",
+ "commitChanges/description": "将您的更改提交给预设。",
+ "commitChanges.manual": "检测到新的字段。您将能够选择要包含在预设中的更改。",
+ "commitChanges.manual.hold.0": "按住",
+ "commitChanges.manual.hold.1": "选择要提交给预设的更改。",
+ "commitChanges.saveAll.hold.0": "按住",
+ "commitChanges.saveAll.hold.1": "保存所有更改。",
+ "commitChanges.saveInPreset.hold.0": "按住",
+ "commitChanges.saveInPreset.hold.1": "仅保存已经包含在预设中的字段的更改。",
+ "commitChanges/error": "未能将更改提交给预设。",
+ "commitChanges.manual/description": "选择要包含在预设中的更改。",
+ "saveAs": "另存为新预设...",
+ "presetNamePlaceholder": "为预设输入一个名称...",
+ "cannotCommitChangesLegacy": "这是一个旧版预设,无法修改。您可以使用“另存为新预设...”创建一个副本。",
+ "cannotCommitChangesNoChanges": "没有更改可以提交。",
+ "emptyNoUnsaved": "选择一个预设...",
+ "emptyWithUnsaved": "未保存的预设",
+ "saveEmptyWithUnsaved": "保存预设为...",
+ "saveConfirm": "保存",
+ "saveCancel": "取消",
+ "saving": "正在保存...",
+ "save/error": "未能保存预设。",
+ "deselect": "取消选择预设",
+ "deselect/error": "取消选择预设失败。",
+ "select/error": "选择预设失败。",
+ "delete/error": "删除预设失败。",
+ "discardChanges": "丢弃未保存的更改",
+ "discardChanges/info": "丢弃所有未提交的更改并恢复预设至原始状态",
+ "newEmptyPreset": "创建新的空预设...",
+ "importPreset": "导入",
+ "contextMenuCopyIdentifier": "复制预设标识符",
+ "contextMenuSelect": "选择预设",
+ "contextMenuDelete": "删除",
+ "contextMenuShare": "发布中...",
+ "contextMenuOpenInHub": "在 Hub 上查看",
+ "contextMenuPullFromHub": "拉取最新版本",
+ "contextMenuPushChanges": "推送更改到 Hub",
+ "contextMenuPushingChanges": "正在推送...",
+ "contextMenuPushedChanges": "更改已推送",
+ "contextMenuExport": "导出文件",
+ "contextMenuRevealInExplorer": "在文件资源管理器中显示",
+ "contextMenuRevealInFinder": "在 Finder 中显示",
+ "share": {
+ "title": "发布预设",
+ "action": "分享你的预设,让他人下载、点赞和fork",
+ "presetOwnerLabel": "所有者",
+ "uploadAs": "你的预设将以 {{name}} 创建",
+ "presetNameLabel": "预设名称",
+ "descriptionLabel": "描述(可选)",
+ "loading": "正在发布...",
+ "success": "预设已成功发布",
+ "presetIsLive": " 已在 Hub 上发布!",
+ "close": "关闭",
+ "confirmViewOnWeb": "在网页上查看",
+ "confirmCopy": "复制链接",
+ "confirmCopied": "已复制!",
+ "pushedToHub": "你的预设已推送到 Hub",
+ "descriptionPlaceholder": "请输入描述...",
+ "willBePublic": "发布你的预设将使其公开",
+ "willBePrivate": "仅您可见",
+ "willBeOrgVisible": "组织内成员均可见",
+ "publicSubtitle": "你的预设现在为 公开。其他人可以在 lmstudio.ai 下载和 fork 它",
+ "privateUsageReached": "私有预设的数量已达上限",
+ "continueInBrowser": "在浏览器继续",
+ "confirmShareButton": "发布",
+ "error": "预设发布失败",
+ "createFreeAccount": "请在 Hub 创建免费账号以发布预设"
+ },
+ "update": {
+ "title": "推送更改到 Hub",
+ "title/success": "预设已成功更新",
+ "subtitle": "修改 并推送到 Hub",
+ "descriptionLabel": "描述",
+ "descriptionPlaceholder": "请输入描述...",
+ "loading": "正在推送...",
+ "cancel": "取消",
+ "createFreeAccount": "请在 Hub 创建免费账号以发布预设",
+ "error": "推送更新失败",
+ "confirmUpdateButton": "推送"
+ },
+ "resolve": {
+ "title": "解决冲突...",
+ "tooltip": "打开窗口以解决与 Hub 版本的差异"
+ },
+ "loginToManage": {
+ "title": "登录以管理..."
+ },
+ "downloadFromHub": {
+ "title": "下载",
+ "downloading": "下载中...",
+ "success": "下载完成!",
+ "error": "下载失败"
+ },
+ "push": {
+ "title": "推送更改",
+ "pushing": "推送中...",
+ "success": "推送成功!",
+ "tooltip": "将本地更改推送到 Hub 上托管的远程版本",
+ "error": "推送失败"
+ },
+ "saveAsNewModal": {
+ "title": "哎呀!在 Hub 上未找到预设",
+ "confirmSaveAsNewDescription": "您是否希望将此预设作为新版本发布?",
+ "confirmButton": "作为新版本发布"
+ },
+ "pull": {
+ "title": "拉取最新版本",
+ "error": "拉取失败",
+ "contextMenuErrorMessage": "拉取失败",
+ "success": "已拉取",
+ "pulling": "拉取中...",
+ "upToDate": "已是最新版本!",
+ "unsavedChangesModal": {
+ "title": "你有未保存的更改。",
+ "bodyContent": "从远程拉取的内容将覆盖您的未保存更改,是否继续?",
+ "confirmButton": "覆盖未保存的更改"
+ }
+ },
+ "import": {
+ "title": "从文件导入预设",
+ "dragPrompt": "拖拽预设 JSON 文件或从电脑选择",
+ "remove": "移除",
+ "cancel": "取消",
+ "importPreset_zero": "导入预设",
+ "importPreset_one": "导入预设",
+ "importPreset_other": "导入 {{count}} 个预设",
+ "selectDialog": {
+ "title": "选择预设文件(.json 或者 .tar.gz)",
+ "button": "导入"
+ },
+ "error": "导入预设失败",
+ "resultsModal": {
+ "titleSuccessSection_one": "成功导入 1 个预设",
+ "titleSuccessSection_other": "成功导入 {{count}} 个预设",
+ "titleFailSection_zero": "",
+ "titleFailSection_one": "({{count}} 个失败)",
+ "titleFailSection_other": "({{count}} 个失败)",
+ "titleAllFailed": "预设导入失败",
+ "importMore": "继续导入",
+ "close": "完成",
+ "successBadge": "成功",
+ "alreadyExistsBadge": "预设已存在",
+ "errorBadge": "错误",
+ "invalidFileBadge": "无效文件",
+ "otherErrorBadge": "导入预设失败",
+ "errorViewDetailsButton": "查看详情",
+ "seeError": "查看错误",
+ "noName": "无预设名称",
+ "useInChat": "在聊天中使用"
+ },
+ "importFromUrl": {
+ "button": "从 URL 导入...",
+ "title": "从 URL 导入",
+ "back": "从文件导入...",
+ "action": "请在下方粘贴你要导入的 LM Studio Hub 预设链接",
+ "invalidUrl": "无效的 URL,请确保输入的是有效的 LM Studio Hub 预设链接。",
+ "tip": "你也可以在 LM Studio Hub 直接点击 {{buttonName}} 按钮安装该预设",
+ "confirm": "导入",
+ "cancel": "取消",
+ "loading": "正在导入...",
+ "error": "下载预设失败。"
+ }
+ },
+ "download": {
+ "title": "从 LM Studio Hub 拉取 ",
+ "subtitle": "保存 到你的预设。保存后你可以在应用中使用此预设",
+ "button": "拉取",
+ "button/loading": "正在拉取...",
+ "cancel": "取消",
+ "error": "下载预设失败。"
+ },
+ "inclusiveness": {
+ "speculativeDecoding": "包含在预设中"
+ }
+ },
+
+ "flashAttentionWarning": "Flash Attention 是一项实验性功能,可能会导致某些模型出现问题。如果您遇到问题,请尝试禁用它。",
+ "llamaKvCacheQuantizationWarning": "KV 缓存量化是一项实验性功能,可能会导致某些模型出现问题。V 缓存量化必须启用 Flash Attention。如果遇到问题,请将默认值重置为\"F16\"。",
+
+ "seedUncheckedHint": "随机种子",
+ "ropeFrequencyBaseUncheckedHint": "自动",
+ "ropeFrequencyScaleUncheckedHint": "自动",
+
+ "hardware": {
+ "environmentVariables": "环境变量",
+ "environmentVariables.info": "如果不确定,请保留默认值",
+ "environmentVariables.reset": "重置为默认值",
+
+ "gpus.information": "配置检测到的图形处理单元 (GPU)",
+ "gpuSettings": {
+ "editMaxCapacity": "编辑最大容量",
+ "hideEditMaxCapacity": "隐藏最大容量编辑",
+ "allOffWarning": "所有 GPU 均已关闭或禁用,请确保分配了至少一个 GPU 以加载模型",
+ "split": {
+ "title": "分配策略",
+ "placeholder": "选择 GPU 内存分配方式",
+ "options": {
+ "generalDescription": "配置模型将如何加载到您的 GPU 上",
+ "evenly": {
+ "title": "均匀分配",
+ "description": "在多个 GPU 之间均匀分配内存"
+ },
+ "priorityOrder": {
+ "title": "按顺序填充",
+ "description": "先在第一个 GPU 上分配内存,然后依次分配到后续 GPU"
+ },
+ "custom": {
+ "title": "自定义",
+ "description": "分配内存",
+ "maxAllocation": "最大分配"
+ }
+ }
+ },
+ "deviceId.info": "此设备的唯一标识符",
+ "changesOnlyAffectNewlyLoadedModels": "更改仅影响新加载的模型",
+ "toggleGpu": "启用/禁用 GPU"
+ }
+ },
+
+ "load.gpuSplitConfig/title": "GPU 分配配置",
+ "envVars/title": "设置环境变量",
+ "envVars": {
+ "select": {
+ "placeholder": "选择环境变量...",
+ "noOptions": "无更多可用选项",
+ "filter": {
+ "placeholder": "过滤搜索结果",
+ "resultsFound_zero": "未找到结果",
+ "resultsFound_one": "找到 1 个结果",
+ "resultsFound_other": "找到 {{count}} 个结果"
+ }
+ },
+ "inputValue": {
+ "placeholder": "输入值"
+ },
+ "values": {
+ "title": "当前值"
+ }
+ }
+}
diff --git a/zh-CN/developer.json b/zh-CN/developer.json
index 9ed7ec42..d2592439 100644
--- a/zh-CN/developer.json
+++ b/zh-CN/developer.json
@@ -1,179 +1,197 @@
-{
- "tabs/server": "本地服务器",
- "tabs/extensions": "LM 运行环境",
- "loadSettings/title": "加载设置",
- "modelSettings/placeholder": "选择一个模型进行配置",
-
- "loadedModels/noModels": "没有已加载的模型",
-
- "serverOptions/title": "服务器选项",
- "serverOptions/configurableTitle": "可配置选项",
- "serverOptions/port/hint": "设置本地服务器将使用的网络端口。默认情况下,LM Studio 使用端口 1234。如果该端口已被占用,您可能需要更改此设置。",
- "serverOptions/port/subtitle": "监听的端口",
- "serverOptions/autostart/title": "自动启动服务器",
- "serverOptions/autostart/hint": "当加载模型时自动启动本地服务器",
- "serverOptions/port/integerWarning": "端口号必须是整数",
- "serverOptions/port/invalidPortWarning": "端口号必须介于 1 到 65535 之间",
- "serverOptions/cors/title": "启用 CORS",
- "serverOptions/cors/hint1": "启用 CORS (跨源资源共享) 允许您访问的网站向 LM Studio 服务器发起请求。",
- "serverOptions/cors/hint2": "当从网页或 VS Code 或其他扩展发起请求时,可能需要启用 CORS。",
- "serverOptions/cors/subtitle": "允许跨源请求",
- "serverOptions/network/title": "在网络中提供服务",
- "serverOptions/network/subtitle": "向网络中的设备开放服务器",
- "serverOptions/network/hint1": "是否允许来自网络中其他设备的连接。",
- "serverOptions/network/hint2": "如果未选中,服务器将仅监听本地主机。",
- "serverOptions/verboseLogging/title": "详细日志记录",
- "serverOptions/verboseLogging/subtitle": "为本地服务器启用详细日志记录",
- "serverOptions/contentLogging/title": "记录提示和响应",
- "serverOptions/contentLogging/subtitle": "本地请求/响应日志记录设置",
- "serverOptions/contentLogging/hint": "是否在本地服务器日志文件中记录提示和/或响应。",
- "serverOptions/redactContent/title": "内容脱敏",
- "serverOptions/redactContent/hint": "启用后,可防止敏感数据(如请求和响应内容)被记录在日志中。",
- "serverOptions/logIncomingTokens/title": "记录传入的 Token",
- "serverOptions/logIncomingTokens/hint": "是否在生成过程中记录每个 Token",
- "serverOptions/fileLoggingMode/title": "文件日志模式",
- "serverOptions/fileLoggingMode/off/title": "关闭",
- "serverOptions/fileLoggingMode/off/hint": "不创建日志文件",
- "serverOptions/fileLoggingMode/succinct/title": "简洁",
- "serverOptions/fileLoggingMode/succinct/hint": "记录与控制台相同的内容。长请求将被截断。",
- "serverOptions/fileLoggingMode/full/title": "完整",
- "serverOptions/fileLoggingMode/full/hint": "不对长请求进行截断。",
- "serverOptions/jitModelLoading/title": "JIT(即时)模型加载",
- "serverOptions/jitModelLoading/hint": "启用后,如果请求指定了一个未加载的模型,该模型将自动加载并使用。此外,\"/v1/models\" 端点还将包含尚未加载的模型。",
- "serverOptions/loadModel/error": "加载模型失败",
- "serverOptions/jitModelLoadingTTL/title": "自动卸载未使用的即时加载模型",
- "serverOptions/jitModelLoadingTTL/hint": "通过 API 请求即时加载的模型,若在一段时间内未被使用,将会被自动卸载(TTL)",
- "serverOptions/jitModelLoadingTTL/ttl/label": "最大空闲 TTL",
- "serverOptions/jitModelLoadingTTL/ttl/unit": "分钟",
- "serverOptions/unloadPreviousJITModelOnLoad/title": "仅保留最后一个即时加载的模型",
- "serverOptions/unloadPreviousJITModelOnLoad/hint": "确保在任意时刻最多只有一个即时加载的模型(会卸载之前的模型)",
-
- "serverLogs/scrollToBottom": "跳转到底部",
- "serverLogs/clearLogs": "清除日志 ({{shortcut}})",
- "serverLogs/openLogsFolder": "打开服务器日志文件夹",
-
- "runtimeSettings/title": "运行环境设置",
- "runtimeSettings/chooseRuntime/title": "配置运行环境",
- "runtimeSettings/chooseRuntime/description": "为每个模型格式选择一个运行环境",
- "runtimeSettings/chooseRuntime/showAllVersions/label": "显示所有运行环境",
- "runtimeSettings/chooseRuntime/showAllVersions/hint": "默认情况下,LM Studio 只显示每个兼容运行环境的最新版本。启用此选项可以查看所有可用的运行环境。",
- "runtimeSettings/chooseRuntime/select/placeholder": "选择一个运行环境",
-
- "runtimeSettings/chooseFrameworks/title": "框架",
- "runtimeSettings/chooseFrameworks/description": "为每个功能选择要使用的框架",
- "runtimeSettings/chooseFramework/documentParser/builtIn/label": "内置解析器",
- "runtimeSettings/chooseFramework/documentParser/select/label": "文档解析器",
- "runtimeSettings/chooseFramework/documentParser/select/placeholder": "请选择文档解析器",
-
- "runtimeOptions/uninstall": "卸载",
- "runtimeOptions/uninstallDialog/title": "卸载 {{runtimeName}}?",
- "runtimeOptions/uninstallDialog/body": "卸载此运行环境将从系统中移除它。此操作不可逆。",
- "runtimeOptions/uninstallDialog/body/caveats": "某些文件可能需要在重启 LM Studio 后才能被移除。",
- "runtimeOptions/uninstallDialog/error": "卸载运行环境失败",
- "runtimeOptions/uninstallDialog/confirm": "继续并卸载",
- "runtimeOptions/uninstallDialog/cancel": "取消",
- "runtimeOptions/noCompatibleRuntimes": "未找到兼容的运行环境",
- "runtimeOptions/downloadIncompatibleRuntime": "此运行环境被认为与您的机器不兼容。它很可能无法正常工作。",
- "runtimeOptions/noRuntimes": "未找到运行环境",
-
- "runtimes": {
- "manageLMRuntimes": "管理 LM 运行环境",
- "includeOlderRuntimeVersions": "包含旧版本",
- "dismiss": "关闭",
- "updateAvailableToast": {
- "title": "LM 运行环境更新可用!"
- },
- "updatedToast": {
- "title": " ✅ LM 运行环境已更新:{{runtime}} → v{{version}}",
- "preferencesUpdated": "新加载的 {{compatibilityTypes}} 模型将使用更新后的运行环境。"
- },
- "noAvx2ErrorMessage": "所有 LM 运行环境当前都需要支持 AVX2 指令集的 CPU",
- "downloadableRuntimes": {
- "runtimeExtensionPacks": "运行环境扩展包",
- "refresh": "刷新",
- "refreshing": "刷新中...",
- "filterSegment": {
- "compatibleOnly": "仅兼容",
- "all": "全部"
- },
- "card": {
- "releaseNotes": "版本说明",
- "latestVersionInstalled": "已安装最新版本",
- "updateAvailable": "更新可用"
- }
- },
- "installedRuntimes": {
- "manage": {
- "title": "管理可用的运行环境"
- },
- "dropdownOptions": {
- "installedVersions": "管理版本",
- "close": "关闭"
- },
- "tabs": {
- "all": "全部",
- "frameworks": "我的框架",
- "engines": "我的引擎"
- },
- "detailsModal": {
- "installedVersions": "{{runtimeName}}的已安装版本",
- "manifestJsonTitle": "清单 JSON(高级)",
- "releaseNotesTitle": "版本说明",
- "noReleaseNotes": "该版本无可用的版本说明",
- "back": "返回",
- "close": "关闭"
- },
- "noEngines": "未安装引擎",
- "noFrameworks": "未安装框架"
- }
- },
-
- "inferenceParams/noParams": "此模型类型无可用配置的推理参数",
-
- "quickDocs": {
- "tabChipTitle": "快速文档",
- "newToolUsePopover": "代码片段现已在“快速文档”中提供。点击此处开始使用工具!",
- "newToolUsePopoverTitle": "📚 快速文档",
- "learnMore": "ℹ️ 👾 要了解有关 LM Studio 本地服务器端的更多信息,请访问[文档](https://lmstudio.ai/docs)。",
- "helloWorld": {
- "title": "你好,世界!"
- },
- "chat": {
- "title": "聊天"
- },
- "structuredOutput": {
- "title": "结构化输出"
- },
- "imageInput": {
- "title": "图像输入"
- },
- "embeddings": {
- "title": "文本嵌入"
- },
- "toolUse": {
- "title": "工具使用",
- "tab": {
- "saveAsPythonFile": "保存为Python文件",
- "runTheScript": "运行脚本:",
- "savePythonFileCopyPaste": "保存为Python文件以进行复制粘贴命令"
- }
- },
- "newBadge": "新功能"
- },
-
- "endpoints/openaiCompatRest/title": "支持的端点 (与 OpenAI 兼容的格式)",
- "endpoints/openaiCompatRest/getModels": "列出当前已加载的模型",
- "endpoints/openaiCompatRest/postCompletions": "文本补全模式。给定一个提示,预测下一个token。注意:OpenAI 认为此端点已'弃用'。",
- "endpoints/openaiCompatRest/postChatCompletions": "聊天补全。向模型发送聊天历史以预测下一个助手响应。",
- "endpoints/openaiCompatRest/postEmbeddings": "文本嵌入。为给定的文本输入生成文本嵌入。接受字符串或字符串数组。",
-
- "model.createVirtualModelFromInstance": "另存为新的虚拟模型",
- "model.createVirtualModelFromInstance/error": "另存为新的虚拟模型失败",
-
- "model": {
- "toolUseSectionTitle": "工具使用",
- "toolUseDescription": "检测到此模型经过工具使用的训练\n\n打开快速文档以了解更多信息。"
- },
-
- "apiConfigOptions/title": "API 配置"
-}
+{
+ "tabs/server": "本地服务器",
+ "tabs/extensions": "LM 运行环境",
+ "loadSettings/title": "加载设置",
+ "modelSettings/placeholder": "选择一个模型进行配置",
+
+ "loadedModels/noModels": "没有已加载的模型",
+
+ "serverOptions/title": "服务器选项",
+ "serverOptions/configurableTitle": "可配置选项",
+ "serverOptions/port/hint": "设置本地服务器将使用的网络端口。默认情况下,LM Studio 使用端口 1234。如果该端口已被占用,您可能需要更改此设置。",
+ "serverOptions/port/subtitle": "监听的端口",
+ "serverOptions/autostart/title": "自动启动服务器",
+ "serverOptions/autostart/hint": "当加载模型时自动启动本地服务器",
+ "serverOptions/port/integerWarning": "端口号必须是整数",
+ "serverOptions/port/invalidPortWarning": "端口号必须介于 1 到 65535 之间",
+ "serverOptions/cors/title": "启用 CORS",
+ "serverOptions/cors/hint1": "启用 CORS (跨源资源共享) 允许您访问的网站向 LM Studio 服务器发起请求。",
+ "serverOptions/cors/hint2": "当从网页或 VS Code 或其他扩展发起请求时,可能需要启用 CORS。",
+ "serverOptions/cors/subtitle": "允许跨源请求",
+ "serverOptions/network/title": "在网络中提供服务",
+ "serverOptions/network/subtitle": "向网络中的设备开放服务器",
+ "serverOptions/network/hint1": "是否允许来自网络中其他设备的连接。",
+ "serverOptions/network/hint2": "如果未选中,服务器将仅监听本地主机。",
+ "serverOptions/verboseLogging/title": "详细日志记录",
+ "serverOptions/verboseLogging/subtitle": "为本地服务器启用详细日志记录",
+ "serverOptions/contentLogging/title": "记录提示和响应",
+ "serverOptions/contentLogging/subtitle": "本地请求/响应日志记录设置",
+ "serverOptions/contentLogging/hint": "是否在本地服务器日志文件中记录提示和/或响应。",
+ "serverOptions/redactContent/title": "内容脱敏",
+ "serverOptions/redactContent/hint": "启用后,可防止敏感数据(如请求和响应内容)被记录在日志中。",
+ "serverOptions/logIncomingTokens/title": "记录传入的 Token",
+ "serverOptions/logIncomingTokens/hint": "是否在生成过程中记录每个 Token",
+ "serverOptions/fileLoggingMode/title": "文件日志模式",
+ "serverOptions/fileLoggingMode/off/title": "关闭",
+ "serverOptions/fileLoggingMode/off/hint": "不创建日志文件",
+ "serverOptions/fileLoggingMode/succinct/title": "简洁",
+ "serverOptions/fileLoggingMode/succinct/hint": "记录与控制台相同的内容。长请求将被截断。",
+ "serverOptions/fileLoggingMode/full/title": "完整",
+ "serverOptions/fileLoggingMode/full/hint": "不对长请求进行截断。",
+ "serverOptions/jitModelLoading/title": "JIT(即时)模型加载",
+ "serverOptions/jitModelLoading/hint": "启用后,如果请求指定了一个未加载的模型,该模型将自动加载并使用。此外,\"/v1/models\" 端点还将包含尚未加载的模型。",
+ "serverOptions/loadModel/error": "加载模型失败",
+ "serverOptions/jitModelLoadingTTL/title": "自动卸载未使用的即时加载模型",
+ "serverOptions/jitModelLoadingTTL/hint": "通过 API 请求即时加载的模型,若在一段时间内未被使用,将会被自动卸载(TTL)",
+ "serverOptions/jitModelLoadingTTL/ttl/label": "最大空闲 TTL",
+ "serverOptions/jitModelLoadingTTL/ttl/unit": "分钟",
+ "serverOptions/unloadPreviousJITModelOnLoad/title": "仅保留最后一个即时加载的模型",
+ "serverOptions/unloadPreviousJITModelOnLoad/hint": "确保在任意时刻最多只有一个即时加载的模型(会卸载之前的模型)",
+
+ "serverLogs/scrollToBottom": "跳转到底部",
+ "serverLogs/clearLogs": "清除日志 ({{shortcut}})",
+ "serverLogs/openLogsFolder": "打开服务器日志文件夹",
+
+ "runtimeSettings/title": "运行环境设置",
+ "runtimeSettings/chooseRuntime/title": "配置运行环境",
+ "runtimeSettings/chooseRuntime/description": "为每个模型格式选择一个运行环境",
+ "runtimeSettings/chooseRuntime/showAllVersions/label": "显示所有运行环境",
+ "runtimeSettings/chooseRuntime/showAllVersions/hint": "默认情况下,LM Studio 只显示每个兼容运行环境的最新版本。启用此选项可以查看所有可用的运行环境。",
+ "runtimeSettings/chooseRuntime/select/placeholder": "选择一个运行环境",
+
+ "runtimeSettings/chooseFrameworks/title": "框架",
+ "runtimeSettings/chooseFrameworks/description": "为每个功能选择要使用的框架",
+ "runtimeSettings/chooseFramework/documentParser/builtIn/label": "内置解析器",
+ "runtimeSettings/chooseFramework/documentParser/select/label": "文档解析器",
+ "runtimeSettings/chooseFramework/documentParser/select/placeholder": "请选择文档解析器",
+
+ "runtimeOptions/uninstall": "卸载",
+ "runtimeOptions/uninstallDialog/title": "卸载 {{runtimeName}}?",
+ "runtimeOptions/uninstallDialog/body": "卸载此运行环境将从系统中移除它。此操作不可逆。",
+ "runtimeOptions/uninstallDialog/body/caveats": "某些文件可能需要在重启 LM Studio 后才能被移除。",
+ "runtimeOptions/uninstallDialog/error": "卸载运行环境失败",
+ "runtimeOptions/uninstallDialog/confirm": "继续并卸载",
+ "runtimeOptions/uninstallDialog/cancel": "取消",
+ "runtimeOptions/noCompatibleRuntimes": "未找到兼容的运行环境",
+ "runtimeOptions/downloadIncompatibleRuntime": "此运行环境被认为与您的机器不兼容。它很可能无法正常工作。",
+ "runtimeOptions/noRuntimes": "未找到运行环境",
+
+ "runtimes": {
+ "manageLMRuntimes": "管理 LM 运行环境",
+ "includeOlderRuntimeVersions": "包含旧版本",
+ "dismiss": "关闭",
+ "updateAvailableToast": {
+ "title": "LM 运行环境更新可用!"
+ },
+ "updatedToast": {
+ "title": " ✅ LM 运行环境已更新:{{runtime}} → v{{version}}",
+ "preferencesUpdated": "新加载的 {{compatibilityTypes}} 模型将使用更新后的运行环境。"
+ },
+ "noAvx2ErrorMessage": "所有 LM 运行环境当前都需要支持 AVX2 指令集的 CPU",
+ "downloadableRuntimes": {
+ "runtimeExtensionPacks": "运行环境扩展包",
+ "refresh": "刷新",
+ "refreshing": "刷新中...",
+ "filterSegment": {
+ "compatibleOnly": "仅兼容",
+ "all": "全部"
+ },
+ "card": {
+ "releaseNotes": "版本说明",
+ "latestVersionInstalled": "已安装最新版本",
+ "updateAvailable": "更新可用"
+ }
+ },
+ "installedRuntimes": {
+ "manage": {
+ "title": "管理可用的运行环境"
+ },
+ "dropdownOptions": {
+ "installedVersions": "管理版本",
+ "close": "关闭"
+ },
+ "tabs": {
+ "all": "全部",
+ "frameworks": "我的框架",
+ "engines": "我的引擎"
+ },
+ "detailsModal": {
+ "installedVersions": "{{runtimeName}}的已安装版本",
+ "manifestJsonTitle": "清单 JSON(高级)",
+ "releaseNotesTitle": "版本说明",
+ "noReleaseNotes": "该版本无可用的版本说明",
+ "back": "返回",
+ "close": "关闭"
+ },
+ "noEngines": "未安装引擎",
+ "noFrameworks": "未安装框架"
+ }
+ },
+
+ "inferenceParams/noParams": "此模型类型无可用配置的推理参数",
+
+ "quickDocs": {
+ "tabChipTitle": "快速文档",
+ "newToolUsePopover": "代码片段现已在“快速文档”中提供。点击此处开始使用工具!",
+ "newToolUsePopoverTitle": "📚 快速文档",
+ "learnMore": "ℹ️ 👾 要了解有关 LM Studio 本地服务器端的更多信息,请访问[文档](https://lmstudio.ai/docs)。",
+ "helloWorld": {
+ "title": "你好,世界!"
+ },
+ "chat": {
+ "title": "聊天"
+ },
+ "structuredOutput": {
+ "title": "结构化输出"
+ },
+ "imageInput": {
+ "title": "图像输入"
+ },
+ "embeddings": {
+ "title": "文本嵌入"
+ },
+ "toolUse": {
+ "title": "工具使用",
+ "tab": {
+ "saveAsPythonFile": "保存为Python文件",
+ "runTheScript": "运行脚本:",
+ "savePythonFileCopyPaste": "保存为Python文件以进行复制粘贴命令"
+ }
+ },
+ "newBadge": "新功能"
+ },
+
+ "endpoints/openaiCompatRest/title": "支持的端点 (与 OpenAI 兼容的格式)",
+ "endpoints/openaiCompatRest/getModels": "列出当前已加载的模型",
+ "endpoints/openaiCompatRest/postCompletions": "文本补全模式。给定一个提示,预测下一个token。注意:OpenAI 认为此端点已'弃用'。",
+ "endpoints/openaiCompatRest/postChatCompletions": "聊天补全。向模型发送聊天历史以预测下一个助手响应。",
+ "endpoints/openaiCompatRest/postEmbeddings": "文本嵌入。为给定的文本输入生成文本嵌入。接受字符串或字符串数组。",
+
+ "model.createVirtualModelFromInstance": "另存为新的虚拟模型",
+ "model.createVirtualModelFromInstance/error": "另存为新的虚拟模型失败",
+
+ "model": {
+ "toolUseSectionTitle": "工具使用",
+ "toolUseDescription": "检测到此模型经过工具使用的训练\n\n打开快速文档以了解更多信息。"
+ },
+
+ "apiConfigOptions/title": "API 配置",
+
+ "serverOptions/allowMcp/hint": "允许使用不在您的 mcp.json 中的 MCP。这些 MCP 连接是临时的,仅在请求期间存在。目前仅支持远程 MCP。",
+ "serverOptions/stop/error": "停止服务器失败",
+ "endpoints/lmStudioRestV1/postModelsLoad": "使用选项加载模型",
+ "serverOptions/allowMcp/mode/off": "关闭",
+ "endpoints/lmStudioRestV1/postModelsDownload": "下载模型",
+ "endpoints/openaiCompatRest/segmentedLabel": "类 OpenAI",
+ "endpoints/anthropicCompatRest/segmentedLabel": "Anthropic 兼容",
+ "serverOptions/start/error": "启动服务器失败",
+ "endpoints/lmStudioRestV1/getModels": "列出可用模型",
+ "endpoints/lmStudioRestV1/postChat": "与模型聊天。支持有状态的多轮对话和 MCP",
+ "serverOptions/allowMcp/title": "允许远程 MCP",
+ "serverOptions/allowMcp/mode/remote": "远程",
+ "endpoints/lmStudioRestV1/getModelsDownloadStatus": "获取模型下载状态",
+ "endpoints/openaiCompatRest/postResponses": "生成模型响应的高级接口。通过将前一个响应的 ID 作为输入传递给下一个响应来创建有状态交互。",
+ "serverOptions/allowMcp/mode/off/hint": "不允许服务器请求使用 MCP",
+ "serverOptions/allowMcp/mode/remote/hint": "允许连接到远程 MCP 服务器",
+ "endpoints/lmStudioRest/segmentedLabel": "LM Studio"
+}
diff --git a/zh-CN/models.json b/zh-CN/models.json
index da248fdf..d0d161bd 100644
--- a/zh-CN/models.json
+++ b/zh-CN/models.json
@@ -1,115 +1,127 @@
-{
- "pageTitle": "我的模型",
- "filterModels.placeholder": "筛选模型...",
- "aggregate_one": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
- "aggregate_other": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
-
- "noModels.title": "您的本地 LLM 将显示在这里。",
- "noModels.discoverButtonText.prefix": "点击左侧边栏的",
- "noModels.discoverButtonText.suffix": "按钮来发现有趣的 LLM。",
- "noModels.discoverModelsPrompt": "去探索一些本地 LLM 吧!",
-
- "modelsTable.arch/label": "架构",
- "modelsTable.params/label": "参数量",
- "modelsTable.publisher/label": "发布者",
- "modelsTable.displayName/label": "名字",
- "modelsTable.modelKey/label": "模型密钥",
- "modelsTable.size/label": "尺寸",
- "modelsTable.dateModified/label": "修改日期",
- "modelsTable.actions/label": "操作",
-
- "modelsTable.quant/label": "量化规格",
- "modelsTable.llms/label": "语言模型",
- "modelsTable.embeddingModels/label": "嵌入模型",
-
- "action.model.delete": "删除",
- "action.model.delete.full": "删除模型",
- "action.model.delete.confirmation/title": "删除 {{name}}",
- "action.model.delete.confirmation/description": "您确定吗?这将永久删除与此模型相关的所有文件,此操作不可逆。",
- "action.model.delete.confirmation/confirm": "删除",
- "action.model.delete/error": "删除模型失败",
-
- "loader.model.bundled": "捆绑",
- "action.cancel": "取消",
- "indexingOngoing": "正在索引模型... 这可能需要几秒钟",
- "index/error_one": "索引以下文件夹失败:",
- "index/error_other": "索引以下文件夹失败:",
- "badModels/title_one": "索引以下模型失败:",
- "badModels/title_other": "索引以下模型失败:",
- "badModels.virtualModelIncorrectPlacement": "虚拟模型放置错误。预期位置为 {{expected}}。实际位置为 {{actual}}。",
- "badModels.virtualModelBadManifest": "无效的虚拟模型清单 (model.yaml):",
- "unresolvedVirtualModels/title_one": "解析以下虚拟模型失败:",
- "unresolvedVirtualModels/title_other": "解析以下虚拟模型失败:",
- "unresolvedVirtualModels.missingModel": "缺少依赖模型:{{missing}}。依赖路径:\n{{chain}}",
- "unresolvedVirtualModels.circular": "检测到循环依赖。",
- "unresolvedVirtualModels.fix": "修复",
- "unresolvedVirtualModels.revealInExplorer": "在文件资源管理器中显示",
- "unresolvedVirtualModels.revealInFinder": "在 Finder 中显示",
- "unresolvedVirtualModels.reveal/error": "显示失败",
-
- "modelsDirectory": "模型目录",
- "modelsDirectory.change": "更改...",
- "modelsDirectory.change/error": "修改模型路径失败",
- "modelsDirectory.reset": "重置为默认路径",
- "modelsDirectory.reveal.mac": "在 Finder 中显示",
- "modelsDirectory.reveal.nonMac": "在文件资源管理器中打开",
- "modelsDirectory.reveal.mac/error": "在 Finder 中显示失败",
- "modelsDirectory.reveal.nonMac/error": "在文件资源管理器中打开失败",
- "modelsDirectory.forceReindex": "刷新",
- "loadState/loaded": "已加载",
- "loadState/loading": "加载中",
- "loadState/unloaded": "未加载",
- "loadState/unloading": "卸载中",
- "loadState/idle": "空闲",
- "pinned": "此模型已被固定。右键点击取消固定。",
- "lastUsed": "最后使用的",
- "contextMenu/pin": "固定到顶部",
- "contextMenu/unpin": "取消固定",
- "contextMenu/copyAbsolutePath": "复制绝对路径",
- "contextMenu/copyModelName": "复制模型路径",
- "contextMenu/copyModelDefaultIdentifier": "复制默认标识符",
- "contextMenu/showRawMetadata": "查看原始元数据",
- "contextMenu/openOnHuggingFace": "在 Hugging Face 上打开",
- "contextMenu": {
- "showOnWeb": "在网页上显示",
- "pullLatest": {
- "label": "拉取最新版本",
- "checking": "检查更新中...",
- "upToDate": "已是最新版本",
- "error": "检查更新失败"
- }
- },
- "tooltip/moreActions": "更多操作",
- "tooltip/getInfo": "获取信息",
- "tooltip/editModelDefaultConfig": "编辑模型默认配置",
- "tooltip/editModelDefaultConfig/override": "编辑模型默认配置(*当前有覆盖)",
- "tooltip/visionBadge": "此模型支持图像输入",
- "tooltip/toolUseBadge": "此模型经过工具使用的训练",
-
- "visionBadge/label": "此模型支持图像输入",
- "toolUseBadge/label": "此模型经过工具使用的训练",
-
- "loader.action.load": "加载模型",
- "loader.action.clearChanges": "清除更改",
- "loader.action.cancel": "取消",
- "loader.info.clickOnModelToLoad": "点击模型以加载",
- "loader.info.configureLoadParameters": "配置模型加载参数",
- "loader.info.activeGeneratorWarning": "您正在使用带有自定义生成器的插件。当前加载的模型是否适用于该插件,取决于生成器的具体实现方式",
-
- "virtual": {
- "local": {
- "create": "创建虚拟模型",
- "title": "创建一个本地虚拟模型",
- "description": "通过将模型与一组配置捆绑来创建虚拟模型,模型的底层权重文件不会被复制。",
- "modelKey.label": "模型密钥",
- "modelKey.placeholder": "输入唯一的模型密钥",
- "modelKey.normalized": "您的模型密钥将被规范化为:{{normalized}}",
- "baseModel.label": "基础模型",
- "baseModel.placeholder": "选择基础模型",
- "baseModel.empty": "下载模型作为基础模型",
- "next": "下一步",
- "confirm": "创建",
- "error": "创建虚拟模型失败"
- }
- }
-}
+{
+ "pageTitle": "我的模型",
+ "filterModels.placeholder": "筛选模型...",
+ "aggregate_one": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
+ "aggregate_other": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
+
+ "noModels.title": "您的本地 LLM 将显示在这里。",
+ "noModels.discoverButtonText.prefix": "点击左侧边栏的",
+ "noModels.discoverButtonText.suffix": "按钮来发现有趣的 LLM。",
+ "noModels.discoverModelsPrompt": "去探索一些本地 LLM 吧!",
+
+ "modelsTable.arch/label": "架构",
+ "modelsTable.params/label": "参数量",
+ "modelsTable.publisher/label": "发布者",
+ "modelsTable.displayName/label": "名字",
+ "modelsTable.modelKey/label": "模型密钥",
+ "modelsTable.size/label": "尺寸",
+ "modelsTable.dateModified/label": "修改日期",
+ "modelsTable.actions/label": "操作",
+
+ "modelsTable.quant/label": "量化规格",
+ "modelsTable.llms/label": "语言模型",
+ "modelsTable.embeddingModels/label": "嵌入模型",
+
+ "action.model.delete": "删除",
+ "action.model.delete.full": "删除模型",
+ "action.model.delete.confirmation/title": "删除 {{name}}",
+ "action.model.delete.confirmation/description": "您确定吗?这将永久删除与此模型相关的所有文件,此操作不可逆。",
+ "action.model.delete.confirmation/confirm": "删除",
+ "action.model.delete/error": "删除模型失败",
+
+ "loader.model.bundled": "捆绑",
+ "action.cancel": "取消",
+ "indexingOngoing": "正在索引模型... 这可能需要几秒钟",
+ "index/error_one": "索引以下文件夹失败:",
+ "index/error_other": "索引以下文件夹失败:",
+ "badModels/title_one": "索引以下模型失败:",
+ "badModels/title_other": "索引以下模型失败:",
+ "badModels.virtualModelIncorrectPlacement": "虚拟模型放置错误。预期位置为 {{expected}}。实际位置为 {{actual}}。",
+ "badModels.virtualModelBadManifest": "无效的虚拟模型清单 (model.yaml):",
+ "unresolvedVirtualModels/title_one": "解析以下虚拟模型失败:",
+ "unresolvedVirtualModels/title_other": "解析以下虚拟模型失败:",
+ "unresolvedVirtualModels.missingModel": "缺少依赖模型:{{missing}}。依赖路径:\n{{chain}}",
+ "unresolvedVirtualModels.circular": "检测到循环依赖。",
+ "unresolvedVirtualModels.fix": "修复",
+ "unresolvedVirtualModels.revealInExplorer": "在文件资源管理器中显示",
+ "unresolvedVirtualModels.revealInFinder": "在 Finder 中显示",
+ "unresolvedVirtualModels.reveal/error": "显示失败",
+
+ "modelsDirectory": "模型目录",
+ "modelsDirectory.change": "更改...",
+ "modelsDirectory.change/error": "修改模型路径失败",
+ "modelsDirectory.reset": "重置为默认路径",
+ "modelsDirectory.reveal.mac": "在 Finder 中显示",
+ "modelsDirectory.reveal.nonMac": "在文件资源管理器中打开",
+ "modelsDirectory.reveal.mac/error": "在 Finder 中显示失败",
+ "modelsDirectory.reveal.nonMac/error": "在文件资源管理器中打开失败",
+ "modelsDirectory.forceReindex": "刷新",
+ "loadState/loaded": "已加载",
+ "loadState/loading": "加载中",
+ "loadState/unloaded": "未加载",
+ "loadState/unloading": "卸载中",
+ "loadState/idle": "空闲",
+ "pinned": "此模型已被固定。右键点击取消固定。",
+ "lastUsed": "最后使用的",
+ "contextMenu/pin": "固定到顶部",
+ "contextMenu/unpin": "取消固定",
+ "contextMenu/copyAbsolutePath": "复制绝对路径",
+ "contextMenu/copyModelName": "复制模型路径",
+ "contextMenu/copyModelDefaultIdentifier": "复制默认标识符",
+ "contextMenu/showRawMetadata": "查看原始元数据",
+ "contextMenu/openOnHuggingFace": "在 Hugging Face 上打开",
+ "contextMenu": {
+ "showOnWeb": "在网页上显示",
+ "pullLatest": {
+ "label": "拉取最新版本",
+ "checking": "检查更新中...",
+ "upToDate": "已是最新版本",
+ "error": "检查更新失败"
+ }
+ },
+ "tooltip/moreActions": "更多操作",
+ "tooltip/getInfo": "获取信息",
+ "tooltip/editModelDefaultConfig": "编辑模型默认配置",
+ "tooltip/editModelDefaultConfig/override": "编辑模型默认配置(*当前有覆盖)",
+ "tooltip/visionBadge": "此模型支持图像输入",
+ "tooltip/toolUseBadge": "此模型经过工具使用的训练",
+
+ "visionBadge/label": "此模型支持图像输入",
+ "toolUseBadge/label": "此模型经过工具使用的训练",
+
+ "loader.action.load": "加载模型",
+ "loader.action.clearChanges": "清除更改",
+ "loader.action.cancel": "取消",
+ "loader.info.clickOnModelToLoad": "点击模型以加载",
+ "loader.info.configureLoadParameters": "配置模型加载参数",
+ "loader.info.activeGeneratorWarning": "您正在使用带有自定义生成器的插件。当前加载的模型是否适用于该插件,取决于生成器的具体实现方式",
+
+ "virtual": {
+ "local": {
+ "create": "创建虚拟模型",
+ "title": "创建一个本地虚拟模型",
+ "description": "通过将模型与一组配置捆绑来创建虚拟模型,模型的底层权重文件不会被复制。",
+ "modelKey.label": "模型密钥",
+ "modelKey.placeholder": "输入唯一的模型密钥",
+ "modelKey.normalized": "您的模型密钥将被规范化为:{{normalized}}",
+ "baseModel.label": "基础模型",
+ "baseModel.placeholder": "选择基础模型",
+ "baseModel.empty": "下载模型作为基础模型",
+ "next": "下一步",
+ "confirm": "创建",
+ "error": "创建虚拟模型失败"
+ }
+ },
+
+ "indexingPageLoaderText": "正在索引模型...",
+ "loader.guardrails.notEnoughResources/options": "选项",
+ "loader.guardrails.notEnoughResources.moreInfoSection.appearsNotEnoughMemory": "您的系统似乎没有足够的内存来加载此模型。",
+ "loader.guardrails.unavailable": "此模型的内存估计不可用",
+ "loader.guardrails.total": "总计",
+ "loader.guardrails.notEnoughResources": "当前设置下没有足够的资源来加载模型",
+ "loader.guardrails.estimatedMemoryUsage": "估计内存使用量",
+ "loader.guardrails.notEnoughResources.alwaysAllowLoadAnyway": "(不推荐)始终允许“强制加载”而无需按住 Alt/Option",
+ "loader.guardrails.notEnoughResources.moreInfoSection.ifYouBelieveThisIsIncorrect": "您可以在设置中调整模型加载保护,或按住 来强制加载。",
+ "loader.guardrails.gpu": "GPU",
+ "loader.guardrails.notEnoughResources.moreInfoSection.warning": "加载过大的模型可能会使系统过载并导致死机。"
+}
diff --git a/zh-CN/settings.json b/zh-CN/settings.json
index 3c0cfbe3..9d098cfe 100644
--- a/zh-CN/settings.json
+++ b/zh-CN/settings.json
@@ -1,195 +1,214 @@
-{
- "settingsDialogTitle": "应用设置",
- "settingsDialogButtonTooltip": "应用设置",
- "accountDialogButtonTooltip": "账户",
-
- "settingsNewButtonPopover": {
- "primary": "应用设置现已移至右下角",
- "secondary": "点击⚙️按钮来打开",
- "tertiary": "或者按"
- },
- "appUpdate": "应用更新",
- "checkingAppUpdate": "正在检查更新...",
- "checkForUpdates": "检查更新",
- "failedCheckingAppUpdate": "检查更新失败",
- "newUpdateAvailable": "LM Studio 有新版本可用!🎉",
- "newBetaUpdateAvailable": "LM Studio 有新测试版可用!🛠️🎉",
- "downloadingInProgress": "正在下载更新...",
- "downloadUpdate": "更新至 LM Studio {{version}}",
- "downloadBetaUpdate": "更新至 LM Studio 测试版 {{version}} (版本号 {{build}})",
- "downloadCompleted": "下载完成!",
- "updateDownloadComplete": "更新下载成功!",
- "updateDownloadFailed": "更新失败!",
- "hasFinishedDownloading": "下载完毕",
- "yourCurrentVersion": "当前版本为:",
- "latestVersion": "最新版本为:",
- "downloadLabel": "立即更新",
- "downloadLabel/Linux": "下载更新",
- "cancelDownloadLabel": "取消",
- "downloadingUpdate": "正在下载 {{item}}...",
- "updateDownloaded": "新更新已成功下载。重启应用以应用更新",
- "restartAppToUpdate": "重新启动应用以应用更新",
- "appUpdatedToastTitle": "已更新至 {{title}}",
- "appUpdatedToastDescriptionPrefix": "查看",
- "AppUpdatedToastDescriptionReleaseNotes": "发行说明",
- "toolUseToastTitle": "测试新功能:工具调用与函数调用 API",
- "toolUseToastDescription": "支持 Llama 3.1/3.2、Mistral、Qwen 等部分模型,兼容 OpenAI 工具调用,快速上手。",
- "toolUseToastButtonText": "前往开发者页面体验",
- "doItLater": "稍后再说",
- "failedToUpdate": "应用更新失败。请检查您的网络连接或稍后再试。",
- "retryInBackground": "后台重试",
- "laterLabel": "稍后",
- "releaseNotesLabel": "发行说明",
- "remindMeLater": "稍后提醒我",
- "failedDownloadUpdate": "下载更新失败",
- "installAndRelaunch": "安装并重新启动",
- "uptodate": "您的应用已是最新版本!当前版本为 {{version}}",
- "preferences": "偏好设置",
- "general": "常规",
- "sideButtonLabels": "显示侧边按钮标签",
- "showModelFileNames": "在“我的模型”中始终显示完整模型文件名",
- "colorThemeLabel": "颜色主题",
- "complexityLevelLabel": "用户界面复杂度级别",
- "selectComplexityLevelPlaceholder": "选择默认的UI复杂度级别",
- "userComplexityLevelLabel": "普通用户",
- "powerUserComplexityLevelLabel": "高级用户",
- "developerComplexityLevelLabel": "开发者",
- "chatSettingsLabel": "聊天设置",
- "chat/alwaysShowPromptTemplate": "始终在聊天侧栏显示提示模板",
- "chat/highlightChatMessageOnHover": "鼠标悬停时高亮显示聊天消息",
- "chat/doubleClickMessageToEdit": "双击聊天消息以编辑",
-
- "chat/aiNaming/label": "AI命名聊天",
- "chat/aiNaming/mode/label": "AI生成的聊天名称",
- "chat/aiNaming/mode/value/never": "关闭",
- "chat/aiNaming/mode/value/never/subTitle": "不使用AI生成聊天名称",
- "chat/aiNaming/mode/value/auto": "自动",
- "chat/aiNaming/mode/value/auto/subTitle": "根据生成速度自动决定是否使用AI生成聊天名称",
- "chat/aiNaming/mode/value/always": "开启",
- "chat/aiNaming/mode/value/always/subTitle": "使用AI生成聊天名称",
- "chat/aiNaming/emoji": "在AI生成的聊天名称中使用表情符号",
- "chat/keyboardShortcuts/label": "键盘快捷键",
- "chat/keyboardShortcuts/verbPrefix": "使用",
- "chat/keyboardShortcuts/regenerate": "重新生成聊天中的最后一条消息",
- "chat/keyboardShortcuts/sendMessage": "发送消息",
-
- "onboarding/blockTitle": "新手引导",
- "onboarding/dismissedHints": "已关闭的新手引导",
- "onboarding/resetHintTooltip": "点击以重新启用新手引导",
- "onboarding/resetAllHints": "重置所有新手引导",
- "onboarding/noneDismissed": "没有已关闭的提示,目前所有提示项都会显示,直至下次关闭",
-
- "firstTimeExperienceLabel": "首次聊天体验",
- "firstTimeExperienceMarkCompletedLabel": "标记为已完成",
- "firstTimeExperienceResetLabel": "重置",
- "showPromptSuggestionsLabel": "创建新聊天时显示提示建议",
- "darkThemeLabel": "深色",
- "lightThemeLabel": "浅色",
- "systemThemeLabel": "自动",
- "sepiaThemeLabel": "护眼",
- "unloadPreviousModelLabel": "选择要加载的模型时,先卸载所有当前已加载的模型",
- "languageLabel": "语言",
- "changeLanguageLabel": "选择应用语言(仍在开发中)",
- "developerLabel": "开发者",
- "localServiceLabel": "本地 LLM 服务(无界面)",
- "showExperimentalFeaturesLabel": "显示实验性功能",
- "appFirstLoadLabel": "应用首次加载体验",
- "showDebugInfoBlocksInChatLabel": "在聊天中显示调试信息块",
- "autoLoadBundledLLMLabel": "启动时自动加载捆绑的大语言模型",
- "showReleaseNotes": "显示发行说明",
- "hideReleaseNotes": "隐藏发行说明",
-
- "backendDownloadNewUpdate": "有新的后端可用!",
- "backendDownloadNewUpdateAction": "前往开发者页面",
-
- "backendDownloadChannel.label": "LM Studio 扩展包下载频道",
- "backendDownloadChannel.value.stable": "稳定版",
- "backendDownloadChannel.value.beta": "测试版",
- "backendDownloadChannel.value.latest": "开发版",
- "backendDownloadChannel.shortLabel": "运行环境下载频道",
- "backendDownloadChannel.hint": "选择从哪个频道下载 LM Studio 扩展包。\"{{stableName}}\" 是推荐给大多数用户的通道。",
-
- "appUpdateChannel.label": "LM Studio 更新频道",
- "appUpdateChannel.value.stable": "稳定版",
- "appUpdateChannel.value.beta": "beta测试版",
- "appUpdateChannel.value.alpha": "alpha测试版",
- "appUpdateChannel.shortLabel": "应用更新频道",
- "appUpdateChannel.hint": "选择从哪个频道接收 LM Studio 应用更新。\"{{stableName}}\" 是推荐给大多数用户的通道。",
-
- "modelLoadingGuardrails.label": "模型加载保护",
- "modelLoadingGuardrails.description": "超出系统资源限制加载模型可能导致系统不稳定或卡死。保护措施可以防止意外过载。如果需要,可以在这里调整这些限制。但请注意,接近系统极限加载模型可能会降低稳定性。",
- "modelLoadingGuardrails.value.off": "关闭(不推荐)",
- "modelLoadingGuardrails.value.off/subTitle": "不对系统过载采取预防措施",
- "modelLoadingGuardrails.value.off/detail": "关闭详情",
- "modelLoadingGuardrails.value.low": "宽松",
- "modelLoadingGuardrails.value.low/subTitle": "轻微预防系统过载",
- "modelLoadingGuardrails.value.low/detail": "宽松详情",
- "modelLoadingGuardrails.value.medium": "平衡",
- "modelLoadingGuardrails.value.medium/subTitle": "适度预防系统过载",
- "modelLoadingGuardrails.value.medium/detail": "平衡详情",
- "modelLoadingGuardrails.value.high": "严格",
- "modelLoadingGuardrails.value.high/subTitle": "强烈预防系统过载",
- "modelLoadingGuardrails.value.high/detail": "严格详情",
- "modelLoadingGuardrails.value.custom": "自定义",
- "modelLoadingGuardrails.value.custom/subTitle": "设置最大可加载模型大小的自定义限制",
- "modelLoadingGuardrails.value.custom/detail": "自定义详情",
- "modelLoadingGuardrails.custom.label": "内存限制:",
- "modelLoadingGuardrails.custom.unitGB": "GB",
- "modelLoadingGuardrails.custom.description": "为模型加载设置自定义内存限制。如果加载的模型会超过此限制,则不会加载模型。",
-
- "experimentalLoadPresets": "在预设中启用模型加载配置支持",
- "experimentalLoadPresets.description": "是否允许预设包含模型加载配置。此功能尚处于试验阶段,我们欢迎反馈。",
-
- "unloadPreviousJITModelOnLoad": "模型自动卸载:始终仅允许一个JIT模型加载(加载新模型时卸载上一个)",
- "autoDeleteExtensionPacks": "自动删除最近最少使用的运行环境扩展包",
- "autoUpdateExtensionPacks": "自动更新选中运行环境扩展包",
- "useHFProxy.label": "使用 LM Studio 的 Hugging Face 代理",
- "useHFProxy.hint": "使用 LM Studio 提供的 Hugging Face 代理进行模型搜索和下载。适用于无法直接访问 Hugging Face 的用户。",
- "separateReasoningContentInResponses": "在API响应中区分 `reasoning_content` 和 `content`(如适用)",
- "separateReasoningContentInResponses/hint": "该设置仅适用于像 DeepSeek R1 及其蒸馏模型等输出带有 和 标记的推理模型。",
-
- "promptWhenCommittingUnsavedChangesWithNewFields": "提交新字段到预设时显示确认对话框",
- "promptWhenCommittingUnsavedChangesWithNewFields.description": "如果您想避免意外向预设添加新字段,这将非常有用",
-
- "enableLocalService": "启用本地 LLM 服务",
- "enableLocalService.subtitle": "使用 LM Studio 的 LLM 服务器,而无需保持 LM Studio 应用程序打开",
- "enableLocalService.description": "启用时,LM Studio 本地 LLM 服务将自动启动。关闭 LM Studio 时,本地 LLM 服务也将在系统托盘中继续运行。",
-
- "expandConfigsOnClick": "点击而非悬停时展开配置",
-
- "migrateChats": {
- "label": "迁移 0.3.0 之前的聊天记录",
- "hasBetterLabel": "重新迁移 0.3.0 之前的聊天记录",
- "action_one": "迁移 1 条聊天记录",
- "action_other": "迁移 {{count}} 条聊天记录",
- "inProgress": "正在迁移聊天记录...",
- "hint": {
- "primary": "我们对 v0.3.0+ 版本的聊天记录内部数据结构进行了改造,以支持多版本聊天消息等功能。为了让旧聊天记录出现在应用中,需要将其迁移到新格式。",
- "details": "迁移过程不会删除您的旧聊天记录,而是会创建一个新格式的副本。",
- "footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前图片不会自动迁移。"
- },
- "hasBetterHint": {
- "primary": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您想要再次运行它吗?",
- "details": "迁移过程将创建一个包含新迁移聊天记录的新文件夹。您的旧聊天记录将保持不变。",
- "footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前图片不会自动迁移。"
- },
- "success": "聊天记录迁移成功!",
- "success_one": "1 条聊天记录迁移成功",
- "success_other": "{{count}} 条聊天记录迁移成功",
- "showInstructionsButton": "显示指南",
- "footerCardText": "来自 LM Studio 早期版本的聊天记录需要迁移才能在此版本中使用。",
- "hasBetterFooterCardText": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您可以重新运行迁移。(我们将创建一个包含新迁移聊天记录的新文件夹。)",
- "dismissConfirm": "关闭",
- "dismissConfirmDescription": "您随时可以在设置中处理聊天记录迁移"
- },
- "toolConfirmation": {
- "label": "工具调用确认",
- "neverAsk": {
- "label": "运行工具前不再询问确认(不建议)",
- "hint": "禁用运行工具前的确认提示。不建议此操作。",
- "warnTitle": "确定吗?",
- "warnDescription": "禁用工具调用确认非常危险。如果您的插件中包含可能执行破坏性操作的工具(例如运行命令、删除文件、覆盖文件、上传文件等),模型将无需确认即可执行这些操作。您可以通过逐个工具或逐个插件的方式禁用确认提示。强烈不建议启用此选项。请谨慎操作。",
- "warnButton": "我了解风险"
- }
- }
-}
+{
+ "settingsDialogTitle": "应用设置",
+ "settingsDialogButtonTooltip": "应用设置",
+ "accountDialogButtonTooltip": "账户",
+
+ "settingsNewButtonPopover": {
+ "primary": "应用设置现已移至右下角",
+ "secondary": "点击⚙️按钮来打开",
+ "tertiary": "或者按"
+ },
+ "appUpdate": "应用更新",
+ "checkingAppUpdate": "正在检查更新...",
+ "checkForUpdates": "检查更新",
+ "failedCheckingAppUpdate": "检查更新失败",
+ "newUpdateAvailable": "LM Studio 有新版本可用!🎉",
+ "newBetaUpdateAvailable": "LM Studio 有新测试版可用!🛠️🎉",
+ "downloadingInProgress": "正在下载更新...",
+ "downloadUpdate": "更新至 LM Studio {{version}}",
+ "downloadBetaUpdate": "更新至 LM Studio 测试版 {{version}} (版本号 {{build}})",
+ "downloadCompleted": "下载完成!",
+ "updateDownloadComplete": "更新下载成功!",
+ "updateDownloadFailed": "更新失败!",
+ "hasFinishedDownloading": "下载完毕",
+ "yourCurrentVersion": "当前版本为:",
+ "latestVersion": "最新版本为:",
+ "downloadLabel": "立即更新",
+ "downloadLabel/Linux": "下载更新",
+ "cancelDownloadLabel": "取消",
+ "downloadingUpdate": "正在下载 {{item}}...",
+ "updateDownloaded": "新更新已成功下载。重启应用以应用更新",
+ "restartAppToUpdate": "重新启动应用以应用更新",
+ "appUpdatedToastTitle": "已更新至 {{title}}",
+ "appUpdatedToastDescriptionPrefix": "查看",
+ "AppUpdatedToastDescriptionReleaseNotes": "发行说明",
+ "toolUseToastTitle": "测试新功能:工具调用与函数调用 API",
+ "toolUseToastDescription": "支持 Llama 3.1/3.2、Mistral、Qwen 等部分模型,兼容 OpenAI 工具调用,快速上手。",
+ "toolUseToastButtonText": "前往开发者页面体验",
+ "doItLater": "稍后再说",
+ "failedToUpdate": "应用更新失败。请检查您的网络连接或稍后再试。",
+ "retryInBackground": "后台重试",
+ "laterLabel": "稍后",
+ "releaseNotesLabel": "发行说明",
+ "remindMeLater": "稍后提醒我",
+ "failedDownloadUpdate": "下载更新失败",
+ "installAndRelaunch": "安装并重新启动",
+ "uptodate": "您的应用已是最新版本!当前版本为 {{version}}",
+ "preferences": "偏好设置",
+ "general": "常规",
+ "sideButtonLabels": "显示侧边按钮标签",
+ "showModelFileNames": "在“我的模型”中始终显示完整模型文件名",
+ "colorThemeLabel": "颜色主题",
+ "complexityLevelLabel": "用户界面复杂度级别",
+ "selectComplexityLevelPlaceholder": "选择默认的UI复杂度级别",
+ "userComplexityLevelLabel": "普通用户",
+ "powerUserComplexityLevelLabel": "高级用户",
+ "developerComplexityLevelLabel": "开发者",
+ "chatSettingsLabel": "聊天设置",
+ "chat/alwaysShowPromptTemplate": "始终在聊天侧栏显示提示模板",
+ "chat/highlightChatMessageOnHover": "鼠标悬停时高亮显示聊天消息",
+ "chat/doubleClickMessageToEdit": "双击聊天消息以编辑",
+
+ "chat/aiNaming/label": "AI命名聊天",
+ "chat/aiNaming/mode/label": "AI生成的聊天名称",
+ "chat/aiNaming/mode/value/never": "关闭",
+ "chat/aiNaming/mode/value/never/subTitle": "不使用AI生成聊天名称",
+ "chat/aiNaming/mode/value/auto": "自动",
+ "chat/aiNaming/mode/value/auto/subTitle": "根据生成速度自动决定是否使用AI生成聊天名称",
+ "chat/aiNaming/mode/value/always": "开启",
+ "chat/aiNaming/mode/value/always/subTitle": "使用AI生成聊天名称",
+ "chat/aiNaming/emoji": "在AI生成的聊天名称中使用表情符号",
+ "chat/keyboardShortcuts/label": "键盘快捷键",
+ "chat/keyboardShortcuts/verbPrefix": "使用",
+ "chat/keyboardShortcuts/regenerate": "重新生成聊天中的最后一条消息",
+ "chat/keyboardShortcuts/sendMessage": "发送消息",
+
+ "onboarding/blockTitle": "新手引导",
+ "onboarding/dismissedHints": "已关闭的新手引导",
+ "onboarding/resetHintTooltip": "点击以重新启用新手引导",
+ "onboarding/resetAllHints": "重置所有新手引导",
+ "onboarding/noneDismissed": "没有已关闭的提示,目前所有提示项都会显示,直至下次关闭",
+
+ "firstTimeExperienceLabel": "首次聊天体验",
+ "firstTimeExperienceMarkCompletedLabel": "标记为已完成",
+ "firstTimeExperienceResetLabel": "重置",
+ "showPromptSuggestionsLabel": "创建新聊天时显示提示建议",
+ "darkThemeLabel": "深色",
+ "lightThemeLabel": "浅色",
+ "systemThemeLabel": "自动",
+ "sepiaThemeLabel": "护眼",
+ "unloadPreviousModelLabel": "选择要加载的模型时,先卸载所有当前已加载的模型",
+ "languageLabel": "语言",
+ "changeLanguageLabel": "选择应用语言(仍在开发中)",
+ "developerLabel": "开发者",
+ "localServiceLabel": "本地 LLM 服务(无界面)",
+ "showExperimentalFeaturesLabel": "显示实验性功能",
+ "appFirstLoadLabel": "应用首次加载体验",
+ "showDebugInfoBlocksInChatLabel": "在聊天中显示调试信息块",
+ "autoLoadBundledLLMLabel": "启动时自动加载捆绑的大语言模型",
+ "showReleaseNotes": "显示发行说明",
+ "hideReleaseNotes": "隐藏发行说明",
+
+ "backendDownloadNewUpdate": "有新的后端可用!",
+ "backendDownloadNewUpdateAction": "前往开发者页面",
+
+ "backendDownloadChannel.label": "LM Studio 扩展包下载频道",
+ "backendDownloadChannel.value.stable": "稳定版",
+ "backendDownloadChannel.value.beta": "测试版",
+ "backendDownloadChannel.value.latest": "开发版",
+ "backendDownloadChannel.shortLabel": "运行环境下载频道",
+ "backendDownloadChannel.hint": "选择从哪个频道下载 LM Studio 扩展包。\"{{stableName}}\" 是推荐给大多数用户的通道。",
+
+ "appUpdateChannel.label": "LM Studio 更新频道",
+ "appUpdateChannel.value.stable": "稳定版",
+ "appUpdateChannel.value.beta": "beta测试版",
+ "appUpdateChannel.value.alpha": "alpha测试版",
+ "appUpdateChannel.shortLabel": "应用更新频道",
+ "appUpdateChannel.hint": "选择从哪个频道接收 LM Studio 应用更新。\"{{stableName}}\" 是推荐给大多数用户的通道。",
+
+ "modelLoadingGuardrails.label": "模型加载保护",
+ "modelLoadingGuardrails.description": "超出系统资源限制加载模型可能导致系统不稳定或卡死。保护措施可以防止意外过载。如果需要,可以在这里调整这些限制。但请注意,接近系统极限加载模型可能会降低稳定性。",
+ "modelLoadingGuardrails.value.off": "关闭(不推荐)",
+ "modelLoadingGuardrails.value.off/subTitle": "不对系统过载采取预防措施",
+ "modelLoadingGuardrails.value.off/detail": "关闭详情",
+ "modelLoadingGuardrails.value.low": "宽松",
+ "modelLoadingGuardrails.value.low/subTitle": "轻微预防系统过载",
+ "modelLoadingGuardrails.value.low/detail": "宽松详情",
+ "modelLoadingGuardrails.value.medium": "平衡",
+ "modelLoadingGuardrails.value.medium/subTitle": "适度预防系统过载",
+ "modelLoadingGuardrails.value.medium/detail": "平衡详情",
+ "modelLoadingGuardrails.value.high": "严格",
+ "modelLoadingGuardrails.value.high/subTitle": "强烈预防系统过载",
+ "modelLoadingGuardrails.value.high/detail": "严格详情",
+ "modelLoadingGuardrails.value.custom": "自定义",
+ "modelLoadingGuardrails.value.custom/subTitle": "设置最大可加载模型大小的自定义限制",
+ "modelLoadingGuardrails.value.custom/detail": "自定义详情",
+ "modelLoadingGuardrails.custom.label": "内存限制:",
+ "modelLoadingGuardrails.custom.unitGB": "GB",
+ "modelLoadingGuardrails.custom.description": "为模型加载设置自定义内存限制。如果加载的模型会超过此限制,则不会加载模型。",
+
+ "experimentalLoadPresets": "在预设中启用模型加载配置支持",
+ "experimentalLoadPresets.description": "是否允许预设包含模型加载配置。此功能尚处于试验阶段,我们欢迎反馈。",
+
+ "unloadPreviousJITModelOnLoad": "模型自动卸载:始终仅允许一个JIT模型加载(加载新模型时卸载上一个)",
+ "autoDeleteExtensionPacks": "自动删除最近最少使用的运行环境扩展包",
+ "autoUpdateExtensionPacks": "自动更新选中运行环境扩展包",
+ "useHFProxy.label": "使用 LM Studio 的 Hugging Face 代理",
+ "useHFProxy.hint": "使用 LM Studio 提供的 Hugging Face 代理进行模型搜索和下载。适用于无法直接访问 Hugging Face 的用户。",
+ "separateReasoningContentInResponses": "在API响应中区分 `reasoning_content` 和 `content`(如适用)",
+ "separateReasoningContentInResponses/hint": "该设置仅适用于像 DeepSeek R1 及其蒸馏模型等输出带有 和 标记的推理模型。",
+
+ "promptWhenCommittingUnsavedChangesWithNewFields": "提交新字段到预设时显示确认对话框",
+ "promptWhenCommittingUnsavedChangesWithNewFields.description": "如果您想避免意外向预设添加新字段,这将非常有用",
+
+ "enableLocalService": "启用本地 LLM 服务",
+ "enableLocalService.subtitle": "使用 LM Studio 的 LLM 服务器,而无需保持 LM Studio 应用程序打开",
+ "enableLocalService.description": "启用时,LM Studio 本地 LLM 服务将自动启动。关闭 LM Studio 时,本地 LLM 服务也将在系统托盘中继续运行。",
+
+ "expandConfigsOnClick": "点击而非悬停时展开配置",
+
+ "migrateChats": {
+ "label": "迁移 0.3.0 之前的聊天记录",
+ "hasBetterLabel": "重新迁移 0.3.0 之前的聊天记录",
+ "action_one": "迁移 1 条聊天记录",
+ "action_other": "迁移 {{count}} 条聊天记录",
+ "inProgress": "正在迁移聊天记录...",
+ "hint": {
+ "primary": "我们对 v0.3.0+ 版本的聊天记录内部数据结构进行了改造,以支持多版本聊天消息等功能。为了让旧聊天记录出现在应用中,需要将其迁移到新格式。",
+ "details": "迁移过程不会删除您的旧聊天记录,而是会创建一个新格式的副本。",
+ "footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前图片不会自动迁移。"
+ },
+ "hasBetterHint": {
+ "primary": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您想要再次运行它吗?",
+ "details": "迁移过程将创建一个包含新迁移聊天记录的新文件夹。您的旧聊天记录将保持不变。",
+ "footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前图片不会自动迁移。"
+ },
+ "success": "聊天记录迁移成功!",
+ "success_one": "1 条聊天记录迁移成功",
+ "success_other": "{{count}} 条聊天记录迁移成功",
+ "showInstructionsButton": "显示指南",
+ "footerCardText": "来自 LM Studio 早期版本的聊天记录需要迁移才能在此版本中使用。",
+ "hasBetterFooterCardText": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您可以重新运行迁移。(我们将创建一个包含新迁移聊天记录的新文件夹。)",
+ "dismissConfirm": "关闭",
+ "dismissConfirmDescription": "您随时可以在设置中处理聊天记录迁移"
+ },
+ "toolConfirmation": {
+ "label": "工具调用确认",
+ "neverAsk": {
+ "label": "运行工具前不再询问确认(不建议)",
+ "hint": "禁用运行工具前的确认提示。不建议此操作。",
+ "warnTitle": "确定吗?",
+ "warnDescription": "禁用工具调用确认非常危险。如果您的插件中包含可能执行破坏性操作的工具(例如运行命令、删除文件、覆盖文件、上传文件等),模型将无需确认即可执行这些操作。您可以通过逐个工具或逐个插件的方式禁用确认提示。强烈不建议启用此选项。请谨慎操作。",
+ "warnButton": "我了解风险"
+ }
+ },
+
+ "modelLoadingGuardrails.alwaysAllowLoadAnyway": "(不推荐)始终允许“强制加载”而无需按住 Alt/Option",
+ "modelDefaultsLabel": "模型默认设置",
+ "appNavigationBarPositionLabel": "导航栏位置",
+ "appNavigationBarPositionLeft": "左侧",
+ "appNavigationBarPositionTop": "顶部",
+ "defaultContextLength": {
+ "label": "默认上下文长度",
+ "maxTitle": "模型最大值",
+ "customTitle": "自定义值",
+ "maxSubtitle": "使用每个模型支持的最大上下文长度。",
+ "customSubtitle": "设置加载新模型时的默认上下文长度。如果模型支持的最大上下文长度较低,则使用该值。",
+ "invalidNaNError": "无效的上下文长度值。使用 {{value}}",
+ "invalidRangeError": "无效的上下文长度值。应在 1 到 2^30 的范围内。使用 {{value}}",
+ "largeContextWarning": "上下文长度越高,模型占用的内存就越多。如果不确定,请不要更改默认值"
+ },
+ "jitTTL": {
+ "subtitle": "即时加载的模型在空闲指定时间后将自动卸载。"
+ }
+}
diff --git a/zh-CN/shared.json b/zh-CN/shared.json
index 24fc7431..c19259be 100644
--- a/zh-CN/shared.json
+++ b/zh-CN/shared.json
@@ -50,6 +50,7 @@
"pending": "待处理",
"doneWithExclamation": "完成!",
"done": "完成",
+ "beta": "测试版",
"complete": {
"completeWithEllipsis": "完成...",