Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/comprehensive_tech_spec.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ export interface IntentRouterOutput {
```

### **Step 2: Define "Intent Router" System Prompt**
This prompt will be used by a `gemini-2.5-flash` model to process and classify all incoming data.
This prompt will be used by a `gemini-1.5-flash` model to process and classify all incoming data.

```text
# System Prompt: Constellation Engine Intent Router
Expand Down
4 changes: 2 additions & 2 deletions docs/implementation_plan.md
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ This phase implements the logic for getting new data into the Unified Lake.

**2.1. Technical Specification: Intent Router**

* **Objective:** Create a system prompt for `gemini-2.5-flash` to classify and structure incoming data.
* **Objective:** Create a system prompt for `gemini-1.5-flash` to classify and structure incoming data.
* **Content:**
```text
# System Prompt: Constellation Engine Intent Router
Expand Down Expand Up @@ -276,7 +276,7 @@ This phase focuses on migrating existing data from the GitHub repository into th
* Uses the Intent Router prompt to process raw text content.
*/
async function generateUnifiedMetadata(content: string): Promise<IntentRouterOutput> {
const model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" }); // Or your fine-tuned model
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); // Or your fine-tuned model
const prompt = `# System Prompt: Constellation Engine Intent Router
... The Intent Router System Prompt ...\n\nINPUT:\n${content}`; // Replace with actual prompt from phase 2

Expand Down
2 changes: 1 addition & 1 deletion scripts/migrateLegacy.ts
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ function extractDateFromMetadata(filePath: string, content: string): string | nu
}

async function generateUnifiedMetadata(content: string): Promise<ExtendedIntentRouterOutput> {
const model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const result = await model.generateContent(`${INTENT_ROUTER_PROMPT}\n\nINPUT:\n${content}`);
const jsonText = result.response.text().replace(/```json\n?|\n?```/g, '').trim();
return JSON.parse(jsonText) as ExtendedIntentRouterOutput;
Expand Down
2 changes: 1 addition & 1 deletion src/biographerAsync.ts
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ export async function handler(event: AsyncPayload) {
- Do not use markdown code blocks (\`\`\`markdown).
`;

const generativeModel = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const generativeModel = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const result = await generativeModel.generateContent(systemPrompt);
const newLifeLogContent = sanitizeMarkdown(result.response.text());

Expand Down
2 changes: 1 addition & 1 deletion src/dreams.ts
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ export async function handler(event: APIGatewayProxyEvent): Promise<APIGatewayPr
- Output RAW markdown only. Do not wrap the output in markdown code blocks.
`;

const generativeModel = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const generativeModel = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const result = await generativeModel.generateContent(systemPrompt);
let newAnalysis = result.response.text();

Expand Down
2 changes: 1 addition & 1 deletion src/fiction.ts
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ export async function handler(event: APIGatewayProxyEventV2): Promise<APIGateway
`;
}

const generativeModel = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const generativeModel = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const result = await generativeModel.generateContent(systemPrompt);
let newBibleContent = result.response.text();

Expand Down
4 changes: 2 additions & 2 deletions src/ingest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ export async function handler(event: APIGatewayProxyEvent): Promise<APIGatewayPr
}

// 2. Intent Router (Classification & Extraction)
const model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" }); // Fast model for routing
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); // Fast model for routing
const result = await model.generateContent(`${INTENT_ROUTER_SYSTEM_PROMPT}\n\nINPUT:\n${rawInput}`);
const responseText = result.response.text().replace(/```json\n?|\n?```/g, '').trim();
const routerOutput = JSON.parse(responseText) as IntentRouterOutput;
Expand Down Expand Up @@ -127,7 +127,7 @@ export async function handler(event: APIGatewayProxyEvent): Promise<APIGatewayPr
const contextText = contextEntries.join("\n\n");

// 4. Synthesize Answer with Gemini
const ragModel = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const ragModel = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const ragPrompt = `${RAG_SYSTEM_PROMPT}\n\nUSER QUESTION:\n${routerOutput.content}\n\nRETRIEVED CONTEXT:\n${contextText || "No relevant context found."}`;

const ragResult = await ragModel.generateContent(ragPrompt);
Expand Down
4 changes: 2 additions & 2 deletions src/librarian/dreamer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ export const handler = async (event?: APIGatewayProxyEventV2): Promise<APIGatewa
- Output RAW markdown only. Do not wrap the output in markdown code blocks.
`;

const generativeModel = genAI.getGenerativeModel({ model: "gemini-2.5-flash" }); // Using 2.0 Flash for consistency
const generativeModel = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); // Using 2.0 Flash for consistency
const result = await generativeModel.generateContent(systemPrompt);
let newAnalysis = result.response.text();

Expand Down Expand Up @@ -221,7 +221,7 @@ export const handler = async (event?: APIGatewayProxyEventV2): Promise<APIGatewa
}

// 3. Synthesize Connection (The Spark)
const model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const prompt = `
You are a serendipity engine. I will present two seemingly disparate entries from the user's second brain.
Your task is to find a creative, insightful, or surprising connection between them.
Expand Down
2 changes: 1 addition & 1 deletion src/librarian/logBook.ts
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ export async function updateReadingList(newLog: string) {
- Do not use markdown code blocks.
`;

const model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const result = await model.generateContent(systemPrompt);
const newContent = sanitizeMarkdown(result.response.text());

Expand Down
2 changes: 1 addition & 1 deletion src/librarian/retrieveAndCurate.ts
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ export const handler = async (event: HandlerInput): Promise<Book | null> => {
}

// 2. Curate & Dedupe: Call Gemini
const model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const prompt = `You are a curator. Review this list of 20 books. Your task is to select the single best book that fits the provided rationale.

**Rationale:** ${rationale}
Expand Down
2 changes: 1 addition & 1 deletion src/librarian/strategicAnalysis.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ export const handler = async (event: { recentEntries: ConstellationRecord[] }):
}).join("\n\n---\n\n");

const model = genAI.getGenerativeModel({
model: "gemini-2.5-flash",
model: "gemini-1.5-flash",
systemInstruction: `Analyze the user's recent entries (notes, saved articles, thoughts). Identify the core topics and interests.
Pay attention to the metadata (tags, media types).

Expand Down
2 changes: 1 addition & 1 deletion src/librarian/synthesizeInsights.ts
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ export const handler = async (event: HandlerInput): Promise<string> => {
return `## No new recommendations were found in this run.`;
}

const model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });

// Format entries for the prompt
const entriesContext = recentEntries.map(entry => {
Expand Down
2 changes: 1 addition & 1 deletion src/lyrics.ts
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ INSTRUCTIONS:
CONSTRAINT: Do not alter the raw text of the lyrics. Only group and arrange them.
IMPORTANT: Output RAW markdown only. Do not wrap the output in markdown code blocks. Do not include any conversational text.`;

const generativeModel = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const generativeModel = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const result = await generativeModel.generateContent(systemPrompt);
let newSongSeeds = result.response.text();

Expand Down
2 changes: 1 addition & 1 deletion src/philosopher.ts
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ ${contextEntries}
- Do not use markdown code blocks (
`;

const generativeModel = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
const generativeModel = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const result = await generativeModel.generateContent(systemPrompt);
const newGardenContent = sanitizeMarkdown(result.response.text());

Expand Down
2 changes: 1 addition & 1 deletion src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ const GITHUB_REPO = Resource.GITHUB_REPO.value;
* @param model The model to use for embedding.
* @returns The embedding vector.
*/
export async function getEmbedding(content: string, model = "text-embedding-004"): Promise<number[]> {
export async function getEmbedding(content: string, model = "embedding-001"): Promise<number[]> {
const embeddingModel = genAI.getGenerativeModel({ model });
const embeddingResult = await embeddingModel.embedContent(content);
return embeddingResult.embedding.values;
Expand Down
4 changes: 2 additions & 2 deletions sst.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ export default $config({

api.route("POST /dream", {
handler: "src/librarian/dreamer.handler",
link: [GEMINI_API_KEY, PINECONE_API_KEY, PINECONE_INDEX_HOST, table, auth],
link: [GEMINI_API_KEY, PINECONE_API_KEY, PINECONE_INDEX_HOST, table, auth, GITHUB_TOKEN, GITHUB_OWNER, GITHUB_REPO],
timeout: "90 seconds",
}, {
auth: {
Expand Down Expand Up @@ -332,7 +332,7 @@ export default $config({
schedule: "rate(1 day)",
job: {
handler: "src/librarian/dreamer.handler",
link: [GEMINI_API_KEY, PINECONE_API_KEY, PINECONE_INDEX_HOST, table],
link: [GEMINI_API_KEY, PINECONE_API_KEY, PINECONE_INDEX_HOST, table, GITHUB_TOKEN, GITHUB_OWNER, GITHUB_REPO],
timeout: "90 seconds",
}
});
Expand Down