Add {displayInfoOfProviderName(providerName).title}
- {(providerName === 'gemini' || providerName === 'openRouter') && (
+ {(providerName === 'gemini' || providerName === 'openRouter' || providerName === 'pollinations') && (
Details
@@ -614,7 +616,7 @@ const VoidOnboardingContent = () => {
const providerNamesOfWantToUseOption: { [wantToUseOption in WantToUseOption]: ProviderName[] } = {
smart: ['anthropic', 'openAI', 'gemini', 'openRouter'],
private: ['ollama', 'vLLM', 'openAICompatible', 'lmStudio'],
- cheap: ['gemini', 'deepseek', 'openRouter', 'ollama', 'vLLM'],
+ cheap: ['gemini', 'deepseek', 'openRouter', 'pollinations', 'ollama', 'vLLM'],
all: providerNames,
}
diff --git a/src/vs/workbench/contrib/cortexide/common/cortexideSettingsService.ts b/src/vs/workbench/contrib/cortexide/common/cortexideSettingsService.ts
index f42ff4ce21a..3b479118877 100644
--- a/src/vs/workbench/contrib/cortexide/common/cortexideSettingsService.ts
+++ b/src/vs/workbench/contrib/cortexide/common/cortexideSettingsService.ts
@@ -709,7 +709,7 @@ class VoidSettingsService extends Disposable implements ICortexideSettingsServic
}
// Try to find the first available configured model (prefer online models first, then local)
- const providerNames: ProviderName[] = ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM']
+ const providerNames: ProviderName[] = ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM', 'pollinations']
for (const providerName of providerNames) {
const providerSettings = this.state.settingsOfProvider[providerName]
diff --git a/src/vs/workbench/contrib/cortexide/common/cortexideSettingsTypes.ts b/src/vs/workbench/contrib/cortexide/common/cortexideSettingsTypes.ts
index b4f3a52e54f..f24858eb555 100644
--- a/src/vs/workbench/contrib/cortexide/common/cortexideSettingsTypes.ts
+++ b/src/vs/workbench/contrib/cortexide/common/cortexideSettingsTypes.ts
@@ -106,6 +106,9 @@ export const displayInfoOfProviderName = (providerName: ProviderName): DisplayIn
else if (providerName === 'awsBedrock') {
return { title: 'AWS Bedrock', }
}
+ else if (providerName === 'pollinations') {
+ return { title: 'Pollinations', }
+ }
throw new Error(`descOfProviderName: Unknown provider name: "${providerName}"`)
}
@@ -128,6 +131,7 @@ export const subTextMdOfProviderName = (providerName: ProviderName): string => {
if (providerName === 'vLLM') return 'Read more about custom [Endpoints here](https://docs.vllm.ai/en/latest/getting_started/quickstart.html#openai-compatible-server).'
if (providerName === 'lmStudio') return 'Read more about custom [Endpoints here](https://lmstudio.ai/docs/app/api/endpoints/openai).'
if (providerName === 'liteLLM') return 'Read more about endpoints [here](https://docs.litellm.ai/docs/providers/openai_compatible).'
+ if (providerName === 'pollinations') return 'Get your [API Key here](https://enter.pollinations.ai/). [API Docs](https://enter.pollinations.ai/api/docs).'
throw new Error(`subTextMdOfProviderName: Unknown provider name: "${providerName}"`)
}
@@ -156,7 +160,8 @@ export const displayInfoOfSettingName = (providerName: ProviderName, settingName
providerName === 'googleVertex' ? 'AIzaSy...' :
providerName === 'microsoftAzure' ? 'key-...' :
providerName === 'awsBedrock' ? 'key-...' :
- '',
+ providerName === 'pollinations' ? 'sk-... or pk-...' :
+ '',
isPasswordField: true,
}
@@ -352,6 +357,12 @@ export const defaultSettingsOfProvider: SettingsOfProvider = {
...modelInfoOfDefaultModelNames(defaultModelsOfProvider.awsBedrock),
_didFillInProviderSettings: undefined,
},
+ pollinations: {
+ ...defaultCustomSettings,
+ ...defaultProviderSettings.pollinations,
+ ...modelInfoOfDefaultModelNames(defaultModelsOfProvider.pollinations),
+ _didFillInProviderSettings: undefined,
+ },
}
diff --git a/src/vs/workbench/contrib/cortexide/common/errorDetectionService.ts b/src/vs/workbench/contrib/cortexide/common/errorDetectionService.ts
index ffe19128877..0bf4695b4a2 100644
--- a/src/vs/workbench/contrib/cortexide/common/errorDetectionService.ts
+++ b/src/vs/workbench/contrib/cortexide/common/errorDetectionService.ts
@@ -300,8 +300,8 @@ class ErrorDetectionService extends Disposable implements IErrorDetectionService
// Resolve auto model selection
if (modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto') {
- const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM'> =
- ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM'];
+ const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM' | 'pollinations'> =
+ ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM', 'pollinations'];
for (const providerName of providerNames) {
const providerSettings = settings.settingsOfProvider[providerName];
diff --git a/src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts b/src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts
index 81589f37083..2409775994c 100644
--- a/src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts
+++ b/src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts
@@ -85,6 +85,9 @@ export const defaultProviderSettings = {
region: 'us-east-1', // add region setting
endpoint: '', // optionally allow overriding default
},
+ pollinations: {
+ apiKey: '',
+ },
} as const
@@ -278,6 +281,14 @@ export const defaultModelsOfProvider = {
microsoftAzure: [],
awsBedrock: [],
liteLLM: [],
+ pollinations: [ // https://enter.pollinations.ai/api/docs, https://pollinations.ai/llms.txt
+ 'openai',
+ 'gemini',
+ 'gemini-large',
+ 'claude',
+ 'deepseek',
+ 'qwen3-coder-30b',
+ ],
} as const satisfies Record
@@ -1701,6 +1712,22 @@ const liteLLMSettings: VoidStaticProviderInfo = { // https://docs.litellm.ai/doc
},
}
+// ---------------- POLLINATIONS ----------------
+const pollinationsSettings: VoidStaticProviderInfo = {
+ modelOptionsFallback: (modelName) => {
+ const fallback = extensiveModelOptionsFallback(modelName);
+ if (fallback && !fallback.specialToolFormat) {
+ fallback.specialToolFormat = 'openai-style';
+ }
+ return fallback;
+ },
+ modelOptions: {},
+ providerReasoningIOSettings: {
+ input: { includeInPayload: openAICompatIncludeInPayloadReasoning },
+ output: { nameOfFieldInDelta: 'reasoning_content' },
+ },
+}
+
// ---------------- OPENROUTER ----------------
const openRouterModelOptions_assumingOpenAICompat = {
@@ -1929,6 +1956,8 @@ const modelSettingsOfProvider: { [providerName in ProviderName]: VoidStaticProvi
liteLLM: liteLLMSettings,
lmStudio: lmStudioSettings,
+ pollinations: pollinationsSettings,
+
googleVertex: googleVertexSettings,
microsoftAzure: microsoftAzureSettings,
awsBedrock: awsBedrockSettings,
diff --git a/src/vs/workbench/contrib/cortexide/common/nlShellParserService.ts b/src/vs/workbench/contrib/cortexide/common/nlShellParserService.ts
index 2aed5e7e90f..62b57c72b8f 100644
--- a/src/vs/workbench/contrib/cortexide/common/nlShellParserService.ts
+++ b/src/vs/workbench/contrib/cortexide/common/nlShellParserService.ts
@@ -69,8 +69,8 @@ class NLShellParserService implements INLShellParserService {
// If auto is selected, try to find a fallback model
if (modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto') {
// Try to find the first available configured model (prefer online models first, then local)
- const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM'> =
- ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM'];
+ const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM' | 'pollinations'> =
+ ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM', 'pollinations'];
let fallbackModel: { providerName: string; modelName: string } | null = null;
for (const providerName of providerNames) {
diff --git a/src/vs/workbench/contrib/cortexide/electron-main/llmMessage/sendLLMMessage.impl.ts b/src/vs/workbench/contrib/cortexide/electron-main/llmMessage/sendLLMMessage.impl.ts
index ed1745d968d..e79ad9c9a25 100644
--- a/src/vs/workbench/contrib/cortexide/electron-main/llmMessage/sendLLMMessage.impl.ts
+++ b/src/vs/workbench/contrib/cortexide/electron-main/llmMessage/sendLLMMessage.impl.ts
@@ -22,7 +22,7 @@ import { availableTools, InternalToolInfo } from '../../common/prompt/prompts.js
import { generateUuid } from '../../../../../base/common/uuid.js';
const getGoogleApiKey = async () => {
- // module‑level singleton
+ // module-level singleton
const auth = new GoogleAuth({ scopes: `https://www.googleapis.com/auth/cloud-platform` });
const key = await auth.getAccessToken()
if (!key) throw new Error(`Google API failed to generate a key.`)
@@ -290,11 +290,11 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ
*/
const { endpoint, apiKey } = settingsOfProvider.awsBedrock
- // ① use the user-supplied proxy if present
- // ② otherwise default to local LiteLLM
+ // 1) use the user-supplied proxy if present
+ // 2) otherwise default to local LiteLLM
let baseURL = endpoint || 'http://localhost:4000/v1'
- // Normalize: make sure we end with “/v1”
+ // Normalize: make sure we end with "/v1"
if (!baseURL.endsWith('/v1'))
baseURL = baseURL.replace(/\/+$/, '') + '/v1'
@@ -323,6 +323,11 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ
const thisConfig = settingsOfProvider[providerName]
return new OpenAI({ baseURL: 'https://api.mistral.ai/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
}
+ else if (providerName === 'pollinations') {
+ // Inference is at gen.pollinations.ai; API keys are from enter.pollinations.ai
+ const thisConfig = settingsOfProvider[providerName]
+ return new OpenAI({ baseURL: 'https://gen.pollinations.ai/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
+ }
else throw new Error(`CortexIDE providerName was invalid: ${providerName}.`)
}
@@ -517,6 +522,36 @@ const rawToolCallObjOfAnthropicParams = (toolBlock: Anthropic.Messages.ToolUseBl
// ------------ OPENAI-COMPATIBLE ------------
+// Placeholder for empty message content; Vertex/Pollinations require "non-whitespace text", not just a space.
+const EMPTY_CONTENT_PLACEHOLDER = '(no content)'
+
+/**
+ * Sanitize messages for APIs (e.g. Vertex, Pollinations) that require non-empty, non-whitespace content
+ * in every message except the optional final assistant message.
+ * Only mutates messages that have a 'content' field (OpenAI/Anthropic style); Gemini-style (parts) are passed through.
+ */
+const sanitizeOpenAIMessagesForEmptyContent = (messages: LLMChatMessage[]): LLMChatMessage[] => {
+ if (!messages?.length) return messages
+ const lastIdx = messages.length - 1
+ const result = messages.map((msg, i) => {
+ if (!('content' in msg)) return msg
+ const content = (msg as { role: string; content: string | unknown[] }).content
+ const isLastAndAssistant = i === lastIdx && msg.role === 'assistant'
+ if (typeof content === 'string') {
+ if (content.trim().length > 0) return msg
+ if (isLastAndAssistant) return msg
+ return { ...msg, content: EMPTY_CONTENT_PLACEHOLDER }
+ }
+ if (Array.isArray(content)) {
+ const hasNonEmptyPart = content.some((p: any) => (p.type === 'text' && p.text?.trim?.()) || (p.type === 'image_url' && p.image_url?.url))
+ if (hasNonEmptyPart || isLastAndAssistant) return msg
+ return { ...msg, content: [{ type: 'text', text: EMPTY_CONTENT_PLACEHOLDER }] }
+ }
+ return msg
+ })
+ return result as LLMChatMessage[]
+}
+
const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onError, settingsOfProvider, modelSelectionOptions, modelName: modelName_, _setAborter, providerName, chatMode, separateSystemMessage, overridesOfModel, mcpTools }: SendChatParams_Internal) => {
const {
modelName,
@@ -525,6 +560,9 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE
additionalOpenAIPayload,
} = getModelCapabilities(providerName, modelName_, overridesOfModel)
+ // APIs like Vertex/Pollinations require non-empty content except for the optional final assistant message
+ const messagesToSend = sanitizeOpenAIMessagesForEmptyContent(messages)
+
const { providerReasoningIOSettings } = getProviderCapabilities(providerName)
// reasoning
@@ -764,7 +802,7 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE
// Try streaming first
const options: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model: modelName,
- messages: messages as any,
+ messages: messagesToSend as any,
stream: true,
...nativeToolsObj,
...additionalOpenAIPayload
@@ -824,7 +862,7 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE
// Silently retry - don't show error notification for organization verification issues
const nonStreamingOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
model: modelName,
- messages: messages as any,
+ messages: messagesToSend as any,
stream: false,
...nativeToolsObj,
...additionalOpenAIPayload
@@ -879,7 +917,7 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE
// CRITICAL: Retry immediately without delay for tool support errors (they're fast to detect)
const optionsWithoutTools: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model: modelName,
- messages: messages as any,
+ messages: messagesToSend as any,
stream: true,
// Explicitly omit tools - don't include nativeToolsObj
...additionalOpenAIPayload
@@ -1551,6 +1589,11 @@ export const sendLLMMessageToProviderImplementation = {
sendFIM: null,
list: null,
},
+ pollinations: {
+ sendChat: (params) => _sendOpenAICompatibleChat(params),
+ sendFIM: null,
+ list: null,
+ },
} satisfies CallFnOfProvider
@@ -1567,7 +1610,7 @@ codestral https://ollama.com/library/codestral/blobs/51707752a87c
[SUFFIX]{{ .Suffix }}[PREFIX] {{ .Prompt }}
deepseek-coder-v2 https://ollama.com/library/deepseek-coder-v2/blobs/22091531faf0
-<|fim▁begin|>{{ .Prompt }}<|fim▁hole|>{{ .Suffix }}<|fim▁end|>
+<|fim_begin|>{{ .Prompt }}<|fim_hole|>{{ .Suffix }}<|fim_end|>
starcoder2 https://ollama.com/library/starcoder2/blobs/3b190e68fefe