Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion build/filters.js
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ module.exports.indentationFilter = [
'!build/win32/**',
'!build/checker/**',
'!src/vs/workbench/contrib/terminal/common/scripts/psreadline/**',
'!src/vs/workbench/contrib/cortexide/**',

// except multiple specific files
'!**/package.json',
Expand Down Expand Up @@ -187,7 +188,7 @@ module.exports.copyrightFilter = [
'!extensions/html-language-features/server/src/modes/typescript/*',
'!extensions/*/server/bin/*',
'!src/vs/workbench/contrib/terminal/common/scripts/psreadline/**',
'!src/vs/workbench/contrib/cortexide/browser/react/**',
'!src/vs/workbench/contrib/cortexide/**',
];

module.exports.tsFormattingFilter = [
Expand Down
36 changes: 20 additions & 16 deletions src/vs/workbench/contrib/cortexide/browser/chatThreadService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3465,23 +3465,27 @@ Output ONLY the JSON, no other text. Start with { and end with }.`
// Mark stream as complete with 0 tokens on error
chatLatencyAudit.markStreamComplete(finalRequestId, 0)

// Audit log: record error
// PERFORMANCE: Reuse cached auditEnabled check from earlier in function
if (auditEnabled && modelSelection) {
await this._auditLogService.append({
ts: Date.now(),
action: 'reply',
model: `${modelSelection.providerName}/${modelSelection.modelName}`,
ok: false,
meta: {
threadId,
requestId: finalRequestId,
error: error?.message,
},
});
}
// Clear stream state immediately so submit button becomes active (avoids stuck "Waiting for model response..." if audit or resolve fails)
this._setStreamState(threadId, { isRunning: undefined, error })

resMessageIsDonePromise({ type: 'llmError', error: error })
try {
// Audit log: record error
if (auditEnabled && modelSelection) {
await this._auditLogService.append({
ts: Date.now(),
action: 'reply',
model: `${modelSelection.providerName}/${modelSelection.modelName}`,
ok: false,
meta: {
threadId,
requestId: finalRequestId,
error: error?.message,
},
});
}
} finally {
resMessageIsDonePromise({ type: 'llmError', error: error })
}
},
onAbort: () => {
// stop the loop to free up the promise, but don't modify state (already handled by whatever stopped it)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4526,42 +4526,52 @@ export const SidebarChat = () => {
{(isRunning === 'LLM' || isRunning === 'preparing') && !displayContentSoFar && !reasoningSoFar ? (
<ProseWrapper>
<div
className="flex items-center gap-2 text-sm opacity-70 loading-state-transition"
className="flex flex-col gap-1"
role="status"
aria-live="polite"
aria-atomic="true"
>
{isRunning === 'preparing' && currThreadStreamState?.llmInfo?.displayContentSoFar ? (
<>
<span className="text-void-fg-2" aria-hidden="false">{currThreadStreamState.llmInfo.displayContentSoFar}</span>
<IconLoading state="thinking" inline />
</>
) : isRunning === 'preparing' ? (
<>
<span className="text-void-fg-2" aria-hidden="false">Preparing request</span>
<IconLoading state="thinking" inline />
</>
) : (
<>
<span className="text-void-fg-2" aria-hidden="false">Generating response</span>
<IconLoading state="typing" inline />
</>
)}
<div className="flex items-center gap-2 text-sm opacity-70 loading-state-transition">
{isRunning === 'preparing' && currThreadStreamState?.llmInfo?.displayContentSoFar ? (
<>
<span className="text-void-fg-2" aria-hidden="false">{currThreadStreamState.llmInfo.displayContentSoFar}</span>
<IconLoading state="thinking" inline />
</>
) : isRunning === 'preparing' ? (
<>
<span className="text-void-fg-2" aria-hidden="false">Preparing request</span>
<IconLoading state="thinking" inline />
</>
) : (
<>
<span className="text-void-fg-2" aria-hidden="false">Generating response</span>
<IconLoading state="typing" inline />
</>
)}
</div>
<span className="text-xs text-void-fg-3 opacity-60">Press Escape to cancel</span>
</div>
</ProseWrapper>
) : null}

{/* Escape hint when streaming (e.g. "Waiting for model response...") */}
{(isRunning === 'LLM' || isRunning === 'preparing') && (displayContentSoFar || reasoningSoFar) ? (
<p className="text-xs text-void-fg-3 opacity-60 mt-1" role="status">Press Escape to cancel</p>
) : null}


{/* error message */}
{latestError === undefined ? null :
<div className='px-2 my-1 message-enter'>
<div className='px-2 my-1 message-enter space-y-2'>
<ErrorDisplay
message={latestError.message}
fullError={latestError.fullError}
onDismiss={() => { chatThreadsService.dismissStreamError(currentThread.id) }}
showDismiss={true}
/>

<p className="text-sm text-void-fg-3 px-1">
You can try again or open settings to change the model.
</p>
<WarningBox className='text-sm my-1 mx-3' onClick={() => { commandService.executeCommand(CORTEXIDE_OPEN_SETTINGS_ACTION_ID) }} text='Open settings' />
</div>
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import { SettingsOfProvider, ModelSelection, ProviderName } from '../../../../co
/**
* Vision-capable providers that require API keys
*/
const VISION_PROVIDERS: ProviderName[] = ['anthropic', 'openAI', 'gemini'];
const VISION_PROVIDERS: ProviderName[] = ['anthropic', 'openAI', 'gemini', 'pollinations'];

/**
* Checks if user has any vision-capable API keys configured
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,9 @@ const cloudProviders: ProviderName[] = ['googleVertex', 'liteLLM', 'microsoftAzu

// Data structures for provider tabs
const providerNamesOfTab: Record<TabName, ProviderName[]> = {
Free: ['gemini', 'openRouter'],
Free: ['gemini', 'openRouter', 'pollinations'],
Local: localProviderNames,
Paid: providerNames.filter(pn => !(['gemini', 'openRouter', ...localProviderNames, ...cloudProviders] as string[]).includes(pn)) as ProviderName[],
Paid: providerNames.filter(pn => !(['gemini', 'openRouter', 'pollinations', ...localProviderNames, ...cloudProviders] as string[]).includes(pn)) as ProviderName[],
'Cloud/Other': cloudProviders,
};

Expand Down Expand Up @@ -237,14 +237,16 @@ const AddProvidersPage = ({ pageIndex, setPageIndex }: { pageIndex: number, setP
<div className="flex items-center justify-between mb-3">
<div className="text-xl font-medium text-void-fg-0 flex items-center gap-2">
Add {displayInfoOfProviderName(providerName).title}
{(providerName === 'gemini' || providerName === 'openRouter') && (
{(providerName === 'gemini' || providerName === 'openRouter' || providerName === 'pollinations') && (
<span
data-tooltip-id="void-tooltip-provider-info"
data-tooltip-place="right"
className="text-xs text-blue-400"
data-tooltip-content={providerName === 'gemini'
? 'Gemini 2.5 Pro offers 25 free chats daily, Flash offers ~500. Upgrade later if you exhaust credits.'
: 'OpenRouter grants 50 free chats a day (1000 with a $10 deposit) on models tagged :free.'}
: providerName === 'openRouter'
? 'OpenRouter grants 50 free chats a day (1000 with a $10 deposit) on models tagged :free.'
: 'Cheap API with many models (Pollen credits). Get your key at enter.pollinations.ai.'}
>
Details
</span>
Expand Down Expand Up @@ -614,7 +616,7 @@ const VoidOnboardingContent = () => {
const providerNamesOfWantToUseOption: { [wantToUseOption in WantToUseOption]: ProviderName[] } = {
smart: ['anthropic', 'openAI', 'gemini', 'openRouter'],
private: ['ollama', 'vLLM', 'openAICompatible', 'lmStudio'],
cheap: ['gemini', 'deepseek', 'openRouter', 'ollama', 'vLLM'],
cheap: ['gemini', 'deepseek', 'openRouter', 'pollinations', 'ollama', 'vLLM'],
all: providerNames,
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -709,7 +709,7 @@ class VoidSettingsService extends Disposable implements ICortexideSettingsServic
}

// Try to find the first available configured model (prefer online models first, then local)
const providerNames: ProviderName[] = ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM']
const providerNames: ProviderName[] = ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM', 'pollinations']

for (const providerName of providerNames) {
const providerSettings = this.state.settingsOfProvider[providerName]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,9 @@ export const displayInfoOfProviderName = (providerName: ProviderName): DisplayIn
else if (providerName === 'awsBedrock') {
return { title: 'AWS Bedrock', }
}
else if (providerName === 'pollinations') {
return { title: 'Pollinations', }
}

throw new Error(`descOfProviderName: Unknown provider name: "${providerName}"`)
}
Expand All @@ -128,6 +131,7 @@ export const subTextMdOfProviderName = (providerName: ProviderName): string => {
if (providerName === 'vLLM') return 'Read more about custom [Endpoints here](https://docs.vllm.ai/en/latest/getting_started/quickstart.html#openai-compatible-server).'
if (providerName === 'lmStudio') return 'Read more about custom [Endpoints here](https://lmstudio.ai/docs/app/api/endpoints/openai).'
if (providerName === 'liteLLM') return 'Read more about endpoints [here](https://docs.litellm.ai/docs/providers/openai_compatible).'
if (providerName === 'pollinations') return 'Get your [API Key here](https://enter.pollinations.ai/). [API Docs](https://enter.pollinations.ai/api/docs).'

throw new Error(`subTextMdOfProviderName: Unknown provider name: "${providerName}"`)
}
Expand Down Expand Up @@ -156,7 +160,8 @@ export const displayInfoOfSettingName = (providerName: ProviderName, settingName
providerName === 'googleVertex' ? 'AIzaSy...' :
providerName === 'microsoftAzure' ? 'key-...' :
providerName === 'awsBedrock' ? 'key-...' :
'',
providerName === 'pollinations' ? 'sk-... or pk-...' :
'',

isPasswordField: true,
}
Expand Down Expand Up @@ -352,6 +357,12 @@ export const defaultSettingsOfProvider: SettingsOfProvider = {
...modelInfoOfDefaultModelNames(defaultModelsOfProvider.awsBedrock),
_didFillInProviderSettings: undefined,
},
pollinations: {
...defaultCustomSettings,
...defaultProviderSettings.pollinations,
...modelInfoOfDefaultModelNames(defaultModelsOfProvider.pollinations),
_didFillInProviderSettings: undefined,
},
}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,8 @@ class ErrorDetectionService extends Disposable implements IErrorDetectionService

// Resolve auto model selection
if (modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto') {
const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM'> =
['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM'];
const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM' | 'pollinations'> =
['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM', 'pollinations'];

for (const providerName of providerNames) {
const providerSettings = settings.settingsOfProvider[providerName];
Expand Down
29 changes: 29 additions & 0 deletions src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,9 @@ export const defaultProviderSettings = {
region: 'us-east-1', // add region setting
endpoint: '', // optionally allow overriding default
},
pollinations: {
apiKey: '',
},

} as const

Expand Down Expand Up @@ -278,6 +281,14 @@ export const defaultModelsOfProvider = {
microsoftAzure: [],
awsBedrock: [],
liteLLM: [],
pollinations: [ // https://enter.pollinations.ai/api/docs, https://pollinations.ai/llms.txt
'openai',
'gemini',
'gemini-large',
'claude',
'deepseek',
'qwen3-coder-30b',
],


} as const satisfies Record<ProviderName, string[]>
Expand Down Expand Up @@ -1701,6 +1712,22 @@ const liteLLMSettings: VoidStaticProviderInfo = { // https://docs.litellm.ai/doc
},
}

// ---------------- POLLINATIONS ----------------
const pollinationsSettings: VoidStaticProviderInfo = {
modelOptionsFallback: (modelName) => {
const fallback = extensiveModelOptionsFallback(modelName);
if (fallback && !fallback.specialToolFormat) {
fallback.specialToolFormat = 'openai-style';
}
return fallback;
},
modelOptions: {},
providerReasoningIOSettings: {
input: { includeInPayload: openAICompatIncludeInPayloadReasoning },
output: { nameOfFieldInDelta: 'reasoning_content' },
},
}


// ---------------- OPENROUTER ----------------
const openRouterModelOptions_assumingOpenAICompat = {
Expand Down Expand Up @@ -1929,6 +1956,8 @@ const modelSettingsOfProvider: { [providerName in ProviderName]: VoidStaticProvi
liteLLM: liteLLMSettings,
lmStudio: lmStudioSettings,

pollinations: pollinationsSettings,

googleVertex: googleVertexSettings,
microsoftAzure: microsoftAzureSettings,
awsBedrock: awsBedrockSettings,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ class NLShellParserService implements INLShellParserService {
// If auto is selected, try to find a fallback model
if (modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto') {
// Try to find the first available configured model (prefer online models first, then local)
const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM'> =
['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM'];
const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM' | 'pollinations'> =
['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM', 'pollinations'];
let fallbackModel: { providerName: string; modelName: string } | null = null;

for (const providerName of providerNames) {
Expand Down
Loading
Loading