forked from vava-nessa/free-coding-models
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpatch-openclaw-models.js
More file actions
111 lines (96 loc) · 3.01 KB
/
patch-openclaw-models.js
File metadata and controls
111 lines (96 loc) · 3.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/usr/bin/env node
/**
* @file patch-openclaw-models.js
* @description Helper function to patch OpenClaw's models.json with all NVIDIA models
*
* This is imported by bin/free-coding-models.js and called automatically
* when setting a model in OpenClaw mode.
*/
import { readFileSync, writeFileSync, existsSync, copyFileSync } from 'fs'
import { homedir } from 'os'
import { join } from 'path'
import { nvidiaNim } from './sources.js'
const MODELS_JSON = join(homedir(), '.openclaw', 'agents', 'main', 'agent', 'models.json')
/**
* Patch models.json to add all NVIDIA models from sources.js
* @returns {Object} { added: number, total: number, wasPatched: boolean }
*/
export function patchOpenClawModelsJson() {
// Read existing config
let modelsConfig
if (!existsSync(MODELS_JSON)) {
return { added: 0, total: 0, wasPatched: false, error: 'models.json not found' }
}
try {
modelsConfig = JSON.parse(readFileSync(MODELS_JSON, 'utf8'))
} catch (err) {
return { added: 0, total: 0, wasPatched: false, error: err.message }
}
// Ensure nvidia provider exists
if (!modelsConfig.providers) modelsConfig.providers = {}
if (!modelsConfig.providers.nvidia) {
modelsConfig.providers.nvidia = {
baseUrl: 'https://integrate.api.nvidia.com/v1',
api: 'openai-completions',
models: []
}
}
// Get existing model IDs
const existingModelIds = new Set(modelsConfig.providers.nvidia.models.map(m => m.id))
// Helper to get model config by tier
function getModelConfig(tier) {
if (tier === 'S+' || tier === 'S') {
return { contextWindow: 128000, maxTokens: 8192 }
}
if (tier === 'A+') {
return { contextWindow: 131072, maxTokens: 4096 }
}
if (tier === 'A' || tier === 'A-') {
return { contextWindow: 131072, maxTokens: 4096 }
}
return { contextWindow: 32768, maxTokens: 2048 }
}
// Add all models from sources.js
let addedCount = 0
for (const [modelId, label, tier] of nvidiaNim) {
if (existingModelIds.has(modelId)) {
continue // Skip already existing models
}
const config = getModelConfig(tier)
const isThinking = modelId.includes('thinking')
modelsConfig.providers.nvidia.models.push({
id: modelId,
name: label,
contextWindow: config.contextWindow,
maxTokens: config.maxTokens,
reasoning: isThinking,
input: ['text'],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0
}
})
addedCount++
}
// Only write if we added something
if (addedCount > 0) {
// Backup
const backupPath = `${MODELS_JSON}.backup-${Date.now()}`
copyFileSync(MODELS_JSON, backupPath)
// Write updated config
writeFileSync(MODELS_JSON, JSON.stringify(modelsConfig, null, 2))
return {
added: addedCount,
total: modelsConfig.providers.nvidia.models.length,
wasPatched: true,
backup: backupPath
}
}
return {
added: 0,
total: modelsConfig.providers.nvidia.models.length,
wasPatched: false
}
}