Skip to content
4 changes: 1 addition & 3 deletions packages/2-mongo-family/9-family/src/exports/pack.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import type { FamilyPackRef } from '@prisma-next/framework-components/components';

const mongoFamilyPack = {
kind: 'family',
id: 'mongo',
familyId: 'mongo',
version: '0.0.1',
} as const;

export default mongoFamilyPack as typeof mongoFamilyPack & FamilyPackRef<'mongo'>;
export default mongoFamilyPack;
41 changes: 16 additions & 25 deletions packages/2-sql/9-family/src/core/authoring-field-presets.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,21 @@
import type { AuthoringFieldNamespace } from '@prisma-next/framework-components/authoring';

/**
* Family-level SQL authoring field presets.
*
* Only presets whose codec IDs align with the ID generator metadata live here
* (see `@prisma-next/ids`). These presets are target-agnostic because the
* generator metadata fixes their codec/native-type to `sql/char@1`
* (`character`) regardless of target, and the PSL interpreter lets the
* generator override the scalar descriptor.
*
* Scalar presets that map to target-specific codecs (e.g. `text`, `int`,
* `boolean`, `dateTime`) are contributed by the target pack (see
* `postgresAuthoringFieldPresets` in `@prisma-next/target-postgres`) so the
* TS callback surface and the PSL scalar surface lower to byte-identical
* contracts for the active target.
*/

const CHARACTER_CODEC_ID = 'sql/char@1';
const CHARACTER_NATIVE_TYPE = 'character';

Expand All @@ -18,31 +34,6 @@ const nanoidOptionsArgument = {
} as const;

export const sqlFamilyAuthoringFieldPresets = {
text: {
kind: 'fieldPreset',
output: {
codecId: 'sql/text@1',
nativeType: 'text',
},
},
timestamp: {
kind: 'fieldPreset',
output: {
codecId: 'sql/timestamp@1',
nativeType: 'timestamp',
},
},
createdAt: {
kind: 'fieldPreset',
output: {
codecId: 'sql/timestamp@1',
nativeType: 'timestamp',
default: {
kind: 'function',
expression: 'CURRENT_TIMESTAMP',
},
},
},
uuid: {
kind: 'fieldPreset',
output: {
Expand Down
8 changes: 3 additions & 5 deletions packages/3-extensions/pgvector/src/exports/pack.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import type { ExtensionPackRef } from '@prisma-next/framework-components/components';
import { pgvectorPackMeta } from '../core/descriptor-meta';
import type { CodecTypes } from '../types/codec-types';

const pgvectorPack = pgvectorPackMeta;

export default pgvectorPack as typeof pgvectorPackMeta &
ExtensionPackRef<'sql', 'postgres'> & {
readonly __codecTypes?: CodecTypes;
};
export default pgvectorPack as typeof pgvectorPackMeta & {
readonly __codecTypes?: CodecTypes;
};
8 changes: 3 additions & 5 deletions packages/3-mongo-target/1-mongo-target/src/exports/pack.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import type { TargetPackRef } from '@prisma-next/framework-components/components';
import { mongoTargetDescriptorMeta } from '../core/descriptor-meta';
import type { CodecTypes } from './codec-types';

const mongoTargetPack = mongoTargetDescriptorMeta;

export default mongoTargetPack as typeof mongoTargetPack &
TargetPackRef<'mongo', 'mongo'> & {
readonly __codecTypes?: CodecTypes;
};
export default mongoTargetPack as typeof mongoTargetPack & {
readonly __codecTypes?: CodecTypes;
};
90 changes: 89 additions & 1 deletion packages/3-targets/3-targets/postgres/src/core/authoring.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
import type { AuthoringTypeNamespace } from '@prisma-next/framework-components/authoring';
import type {
AuthoringFieldNamespace,
AuthoringTypeNamespace,
} from '@prisma-next/framework-components/authoring';

export const postgresAuthoringTypes = {
enum: {
Expand All @@ -13,3 +16,88 @@ export const postgresAuthoringTypes = {
},
},
} as const satisfies AuthoringTypeNamespace;

/**
* Field presets contributed by the Postgres target pack.
*
* These mirror the PSL scalar-to-codec mapping used by the Postgres adapter
* (see `createPostgresPslScalarTypeDescriptors`), so that authoring a field
* via the TS callback surface (e.g. `field.int()`) and via the PSL scalar
* surface (e.g. `Int`) lowers to byte-identical contracts.
*/
export const postgresAuthoringFieldPresets = {
text: {
kind: 'fieldPreset',
output: {
codecId: 'pg/text@1',
nativeType: 'text',
},
},
int: {
kind: 'fieldPreset',
output: {
codecId: 'pg/int4@1',
nativeType: 'int4',
},
},
bigint: {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

F06 — 4 of 10 presets lack parity fixture coverage (deferred)

bigint, decimal, bytes, and dateTime are defined here but not exercised by any parity fixture. If the PSL mapping or codec IDs change for these scalars, the misalignment would go undetected.

I understand this is a deliberate scope decision (April stop condition: terseness, not vocabulary coverage). Noting it for visibility — please track the follow-up.

kind: 'fieldPreset',
output: {
codecId: 'pg/int8@1',
nativeType: 'int8',
},
},
float: {
kind: 'fieldPreset',
output: {
codecId: 'pg/float8@1',
nativeType: 'float8',
},
},
decimal: {
kind: 'fieldPreset',
output: {
codecId: 'pg/numeric@1',
nativeType: 'numeric',
},
},
boolean: {
kind: 'fieldPreset',
output: {
codecId: 'pg/bool@1',
nativeType: 'bool',
},
},
json: {
kind: 'fieldPreset',
output: {
codecId: 'pg/jsonb@1',
nativeType: 'jsonb',
},
},
bytes: {
kind: 'fieldPreset',
output: {
codecId: 'pg/bytea@1',
nativeType: 'bytea',
},
},
dateTime: {
kind: 'fieldPreset',
output: {
codecId: 'pg/timestamptz@1',
nativeType: 'timestamptz',
},
},
createdAt: {
kind: 'fieldPreset',
output: {
codecId: 'pg/timestamptz@1',
nativeType: 'timestamptz',
default: {
kind: 'function',
expression: 'now()',
},
},
},
} as const satisfies AuthoringFieldNamespace;
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { postgresAuthoringTypes } from './authoring';
import { postgresAuthoringFieldPresets, postgresAuthoringTypes } from './authoring';

export const postgresTargetDescriptorMeta = {
kind: 'target',
Expand All @@ -9,5 +9,6 @@ export const postgresTargetDescriptorMeta = {
capabilities: {},
authoring: {
type: postgresAuthoringTypes,
field: postgresAuthoringFieldPresets,
},
} as const;
8 changes: 3 additions & 5 deletions packages/3-targets/3-targets/postgres/src/exports/pack.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import type { CodecTypes } from '@prisma-next/adapter-postgres/codec-types';
import type { TargetPackRef } from '@prisma-next/framework-components/components';
import { postgresTargetDescriptorMeta } from '../core/descriptor-meta';

const postgresPack = postgresTargetDescriptorMeta;

export default postgresPack as typeof postgresTargetDescriptorMeta &
TargetPackRef<'sql', 'postgres'> & {
readonly __codecTypes?: CodecTypes;
};
export default postgresPack as typeof postgresTargetDescriptorMeta & {
readonly __codecTypes?: CodecTypes;
};
Comment on lines +6 to +8
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

F04 — __codecTypes phantom bolted on via cast at the export site

Same pattern in all pack exports (sqlite, pgvector, mongo-target). Now that the PackRef brands are gone, this phantom is the only reason the cast exists. It could live on the descriptor meta declaration instead (e.g. as const & { readonly __codecTypes?: CodecTypes } in descriptor-meta.ts), making pack.ts a plain re-export with no cast.

This PR touched every one of these files and had the opportunity to clean this up.

3 changes: 1 addition & 2 deletions packages/3-targets/3-targets/sqlite/src/exports/pack.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import type { CodecTypes } from '@prisma-next/adapter-sqlite/codec-types';
import type { TargetPackRef } from '@prisma-next/framework-components/components';
import { sqliteTargetDescriptorMeta } from '../core/descriptor-meta';

const sqlitePack = sqliteTargetDescriptorMeta;

export default sqlitePack as TargetPackRef<'sql', 'sqlite'> & {
export default sqlitePack as typeof sqliteTargetDescriptorMeta & {
readonly __codecTypes?: CodecTypes;
};
52 changes: 52 additions & 0 deletions test/integration/test/authoring/callback-mode-terseness.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import { readFileSync } from 'node:fs';
import { dirname, join } from 'node:path';
import { fileURLToPath } from 'node:url';
import { describe, expect, it } from 'vitest';

const __dirname = dirname(fileURLToPath(import.meta.url));
const fixtureDir = join(__dirname, 'parity', 'callback-mode-scalars');

/**
* Counts semantic lines in a source file — non-blank lines that are not
* comments. Matches the heuristic used by the contract-psl ts-psl-parity
* test so results are comparable across parity tests.
*/
function countSemanticLines(source: string): number {
return source
.split('\n')
.map((line) => line.trim())
.filter((line) => line.length > 0 && !line.startsWith('//')).length;
Comment on lines +14 to +18
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

F02 — countSemanticLines duplicated across test files

The JSDoc says this matches the heuristic in the contract-psl ts-psl-parity test. If that copy drifts, the two tests would measure terseness differently. Consider extracting to a shared test utility (e.g. alongside authoring-parity-test-helpers.ts) and importing from both locations.

}

describe('VP2: TS callback-mode authoring terseness parity', () => {
const pslSource = readFileSync(join(fixtureDir, 'schema.prisma'), 'utf-8');
const tsSource = readFileSync(join(fixtureDir, 'contract.ts'), 'utf-8');
const pslLines = countSemanticLines(pslSource);
const tsLines = countSemanticLines(tsSource);
const ratio = tsLines / pslLines;

it('keeps the callback-mode TS contract in the ~1.5–2.1x PSL ballpark', () => {
// VP2 stop condition: "The TypeScript version of a representative
// contract is in the same ballpark of length as the PSL version."
//
// Baseline (April milestone): structural TS authoring was ~3–5x the
// PSL version. The callback-mode field presets (contributed by
// @prisma-next/target-postgres/pack) should collapse scalar fields to
// one line each, pulling the ratio well under the baseline.
//
// The upper bound of 2.1x is intentional: any drift above 2.1x should
// force a re-review of the preset vocabulary rather than silently
// widen the acceptance window.
expect(ratio).toBeLessThanOrEqual(2.1);
expect(ratio).toBeGreaterThan(0);
});

it('is measurably tighter than the structural core-surface baseline', () => {
const coreSurfaceDir = join(__dirname, 'parity', 'core-surface');
const coreSurfacePsl = readFileSync(join(coreSurfaceDir, 'schema.prisma'), 'utf-8');
const coreSurfaceTs = readFileSync(join(coreSurfaceDir, 'contract.ts'), 'utf-8');
const coreRatio = countSemanticLines(coreSurfaceTs) / countSemanticLines(coreSurfacePsl);

expect(ratio).toBeLessThan(coreRatio);
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import pgvector from '@prisma-next/extension-pgvector/pack';
import sqlFamily from '@prisma-next/family-sql/pack';
import { defineContract, rel } from '@prisma-next/sql-contract-ts/contract-builder';
import postgresPack from '@prisma-next/target-postgres/pack';

export const contract = defineContract(
{ family: sqlFamily, target: postgresPack, extensionPacks: { pgvector } },
({ field, model, type }) => {
const types = {
Embedding: type.pgvector.Vector(1536),
} as const;
const User = model('User', {
fields: {
id: field.int().defaultSql('autoincrement()').id(),
email: field.text().unique(),
age: field.int(),
isActive: field.boolean().default(true),
score: field.float().optional(),
profile: field.json().optional(),
embedding: field.namedType(types.Embedding).optional(),
createdAt: field.createdAt(),
},
}).sql({ table: 'user' });
const Post = model('Post', {
fields: {
id: field.int().defaultSql('autoincrement()').id(),
userId: field.int(),
title: field.text(),
rating: field.float().optional(),
},
relations: {
user: rel
.belongsTo(User, { from: 'userId', to: 'id' })
.sql({ fk: { onDelete: 'cascade', onUpdate: 'cascade' } }),
},
}).sql({ table: 'post' });
return { types, models: { User, Post } };
},
);
Loading
Loading