diff --git a/src/blockchain/adapters/cascade-port.ts b/src/blockchain/adapters/cascade-port.ts index 2accce2..ca0b68e 100644 --- a/src/blockchain/adapters/cascade-port.ts +++ b/src/blockchain/adapters/cascade-port.ts @@ -154,8 +154,14 @@ export class BlockchainActionAdapter implements CascadeChainPort { ); } + // LEP-5 SVC params — zero means use defaults + const svcChallengeCount = parseInt(params.svc_challenge_count, 10) || 0; + const svcMinChunksForChallenge = parseInt(params.svc_min_chunks_for_challenge, 10) || 0; + return { max_raptor_q_symbols: maxRaptorQSymbols, + svc_challenge_count: svcChallengeCount, + svc_min_chunks_for_challenge: svcMinChunksForChallenge, }; } @@ -212,6 +218,7 @@ export class BlockchainActionAdapter implements CascadeChainPort { rq_ids_ic: metadata.rq_ids_ic, signatures: metadata.signatures, public: metadata.public, + ...(metadata.availability_commitment ? { availability_commitment: metadata.availability_commitment } : {}), }), price: priceAmount+"ulume", expirationTime: input.expirationTime, diff --git a/src/blockchain/client.ts b/src/blockchain/client.ts index f25529e..f511836 100644 --- a/src/blockchain/client.ts +++ b/src/blockchain/client.ts @@ -45,6 +45,8 @@ class RpcActionQuery implements ActionQuery { fee_base: params?.baseActionFee?.amount ?? "0", fee_per_kb: params?.feePerKbyte?.amount ?? "0", max_raptor_q_symbols: params?.maxRaptorQSymbols?.toString() ?? "0", + svc_challenge_count: params?.svcChallengeCount?.toString() ?? "0", + svc_min_chunks_for_challenge: params?.svcMinChunksForChallenge?.toString() ?? "0", }; } diff --git a/src/blockchain/interfaces.ts b/src/blockchain/interfaces.ts index 4ae87d3..26f511c 100644 --- a/src/blockchain/interfaces.ts +++ b/src/blockchain/interfaces.ts @@ -131,6 +131,10 @@ export interface ActionParams { fee_per_kb: string; /** Maximum number of RaptorQ symbols allowed */ max_raptor_q_symbols: string; + /** LEP-5: Number of chunks to challenge during SVC (0 = default 8) */ + svc_challenge_count: string; + /** LEP-5: Minimum chunks for SVC (0 = default 4) */ + svc_min_chunks_for_challenge: string; } /** diff --git a/src/cascade/client.ts b/src/cascade/client.ts index 1d2b1b1..c63ae00 100644 --- a/src/cascade/client.ts +++ b/src/cascade/client.ts @@ -229,14 +229,31 @@ export class SNApiClient { * ``` */ async getTaskStatus(taskId: string): Promise { - // Prefer the versioned path; fall back to legacy/non-versioned path on 404 + // `/status` is SSE on sn-api-server and can block when polled via fetch text. + // Use `/history` for polling and return the latest status entry. try { - return await this.http.get(`/api/v1/actions/cascade/tasks/${taskId}/status`); + const history = await this.http.get>>(`/api/v1/actions/cascade/tasks/${taskId}/history`); + if (Array.isArray(history) && history.length > 0) { + return history[history.length - 1] as unknown as TaskStatus; + } } catch (err) { - if (err instanceof HttpError && err.statusCode === 404) { + if (!(err instanceof HttpError && err.statusCode === 404)) { + throw err; + } + const legacyHistory = await this.http.get>>(`/api/actions/cascade/tasks/${taskId}/history`); + if (Array.isArray(legacyHistory) && legacyHistory.length > 0) { + return legacyHistory[legacyHistory.length - 1] as unknown as TaskStatus; + } + } + + // Fallback to explicit status endpoint for deployments where it is plain JSON. + try { + return await this.http.get(`/api/v1/actions/cascade/tasks/${taskId}/status`); + } catch (statusErr) { + if (statusErr instanceof HttpError && statusErr.statusCode === 404) { return this.http.get(`/api/actions/cascade/tasks/${taskId}/status`); } - throw err; + throw statusErr; } } diff --git a/src/cascade/commitment.ts b/src/cascade/commitment.ts new file mode 100644 index 0000000..9cbcbf4 --- /dev/null +++ b/src/cascade/commitment.ts @@ -0,0 +1,207 @@ +/** + * LEP-5 Availability Commitment - Merkle tree construction and challenge index derivation. + * + * This module implements the client-side commitment logic for the Storage + * Verification Challenge (SVC). It builds a BLAKE3 Merkle tree over file chunks, + * derives deterministic challenge indices from the root, and produces an + * AvailabilityCommitment that gets submitted on-chain during cascade registration. + * + * Must produce identical commitments to the Go implementation in + * supernode/pkg/cascadekit/commitment.go. + * + * @module cascade/commitment + */ + +import { blake3HashBytes } from '../internal/hash'; +import type { AvailabilityCommitment } from '../codegen/lumera/action/v1/metadata'; +import { HashAlgo } from '../codegen/lumera/action/v1/metadata'; + +/** Default chunk size: 256 KiB */ +export const DEFAULT_CHUNK_SIZE = 262144; + +/** Minimum number of bytes for commitment (below this, skip SVC) */ +export const MIN_TOTAL_SIZE = 4; + +/** Commitment type string matching the Go constant */ +export const COMMITMENT_TYPE = "lep5/chunk-merkle/v1"; + +/** Default SVC challenge count (matches chain default) */ +export const DEFAULT_SVC_CHALLENGE_COUNT = 8; + +/** Default minimum chunks for challenge (matches chain default) */ +export const DEFAULT_SVC_MIN_CHUNKS_FOR_CHALLENGE = 4; + +/** + * Select the chunk size for a given file. + * Starts at DEFAULT_CHUNK_SIZE and halves until there are at least minChunks chunks. + */ +export function selectChunkSize(fileSize: number, minChunks: number): number { + let chunkSize = DEFAULT_CHUNK_SIZE; + while (chunkSize > 1 && Math.ceil(fileSize / chunkSize) < minChunks) { + chunkSize = Math.floor(chunkSize / 2); + } + return chunkSize; +} + +/** + * Split file bytes into chunks of the given size. + */ +export function chunkBytes(data: Uint8Array, chunkSize: number): Uint8Array[] { + const chunks: Uint8Array[] = []; + for (let offset = 0; offset < data.length; offset += chunkSize) { + chunks.push(data.subarray(offset, Math.min(offset + chunkSize, data.length))); + } + return chunks; +} + +/** + * Hash a leaf node: BLAKE3(0x00 || index_be32 || data) + * Must match lumera/x/action/v1/merkle.HashLeaf + */ +export async function hashLeaf(index: number, data: Uint8Array): Promise { + const buf = new Uint8Array(1 + 4 + data.length); + buf[0] = 0x00; // leaf domain separator + buf[1] = (index >>> 24) & 0xff; + buf[2] = (index >>> 16) & 0xff; + buf[3] = (index >>> 8) & 0xff; + buf[4] = index & 0xff; + buf.set(data, 5); + return blake3HashBytes(buf); +} + +/** + * Hash an internal node: BLAKE3(0x01 || left || right) + * Must match lumera/x/action/v1/merkle.HashNode + */ +export async function hashNode(left: Uint8Array, right: Uint8Array): Promise { + const buf = new Uint8Array(1 + left.length + right.length); + buf[0] = 0x01; // internal node domain separator + buf.set(left, 1); + buf.set(right, 1 + left.length); + return blake3HashBytes(buf); +} + +/** + * Build a Merkle tree from leaf hashes. + * Returns all levels: tree[0] = leaves, tree[last] = [root]. + */ +export async function buildTree(leafHashes: Uint8Array[]): Promise { + if (leafHashes.length === 0) { + throw new Error("cannot build tree from zero leaves"); + } + + const levels: Uint8Array[][] = [leafHashes]; + let current = leafHashes; + + while (current.length > 1) { + const next: Uint8Array[] = []; + for (let i = 0; i < current.length; i += 2) { + if (i + 1 < current.length) { + next.push(await hashNode(current[i], current[i + 1])); + } else { + // Odd node: promote to next level + next.push(current[i]); + } + } + levels.push(next); + current = next; + } + + return levels; +} + +/** + * Derive deterministic challenge indices from the Merkle root. + * Uses BLAKE3(root || uint32be(counter)) mod numChunks. + * Must match supernode/pkg/cascadekit/commitment.go:deriveSimpleIndices + */ +export async function deriveIndices( + root: Uint8Array, + numChunks: number, + challengeCount: number +): Promise { + const indices: number[] = []; + const seen = new Set(); + let counter = 0; + + while (indices.length < challengeCount && indices.length < numChunks) { + // BLAKE3(root || uint32be(counter)) + const buf = new Uint8Array(root.length + 4); + buf.set(root, 0); + buf[root.length] = (counter >>> 24) & 0xff; + buf[root.length + 1] = (counter >>> 16) & 0xff; + buf[root.length + 2] = (counter >>> 8) & 0xff; + buf[root.length + 3] = counter & 0xff; + + const h = await blake3HashBytes(buf); + + // Use first 8 bytes as uint64 mod numChunks + // DataView for big-endian reading + const view = new DataView(h.buffer, h.byteOffset, h.byteLength); + const hi32 = view.getUint32(0, false); // big-endian + const lo32 = view.getUint32(4, false); + // Compute (hi32 * 2^32 + lo32) mod numChunks using BigInt for precision + const val = (BigInt(hi32) << 32n) | BigInt(lo32); + const idx = Number(val % BigInt(numChunks)); + + if (!seen.has(idx)) { + seen.add(idx); + indices.push(idx); + } + counter++; + + // Safety: avoid infinite loop if numChunks < challengeCount + if (counter > challengeCount * 100) { + break; + } + } + + return indices; +} + +/** + * Build an AvailabilityCommitment from file bytes. + * + * @param fileBytes - Raw file content + * @param challengeCount - Number of challenge indices (from chain params, default 8) + * @param minChunks - Minimum chunks for SVC (from chain params, default 4) + * @returns The commitment (or undefined if file is too small) and the Merkle tree levels + */ +export async function buildCommitment( + fileBytes: Uint8Array, + challengeCount: number = DEFAULT_SVC_CHALLENGE_COUNT, + minChunks: number = DEFAULT_SVC_MIN_CHUNKS_FOR_CHALLENGE, +): Promise<{ commitment: AvailabilityCommitment; tree: Uint8Array[][] } | undefined> { + if (fileBytes.length < MIN_TOTAL_SIZE) { + return undefined; + } + + const chunkSize = selectChunkSize(fileBytes.length, minChunks); + const chunks = chunkBytes(fileBytes, chunkSize); + const numChunks = chunks.length; + + // Hash all leaves + const leafHashes: Uint8Array[] = []; + for (let i = 0; i < chunks.length; i++) { + leafHashes.push(await hashLeaf(i, chunks[i])); + } + + // Build tree + const tree = await buildTree(leafHashes); + const root = tree[tree.length - 1][0]; + + // Derive challenge indices + const challengeIndices = await deriveIndices(root, numChunks, challengeCount); + + const commitment: AvailabilityCommitment = { + commitmentType: COMMITMENT_TYPE, + hashAlgo: HashAlgo.HASH_ALGO_BLAKE3, + chunkSize, + totalSize: BigInt(fileBytes.length), + numChunks, + root, + challengeIndices, + }; + + return { commitment, tree }; +} diff --git a/src/cascade/ports.ts b/src/cascade/ports.ts index 756c437..95ef3b5 100644 --- a/src/cascade/ports.ts +++ b/src/cascade/ports.ts @@ -37,6 +37,18 @@ export interface CascadeActionParams { * Used for layout ID derivation in LEP-1. */ max_raptor_q_symbols: number; + + /** + * LEP-5: Number of chunks to challenge during SVC. + * Zero means use default (8). + */ + svc_challenge_count: number; + + /** + * LEP-5: Minimum chunks required for SVC. + * Zero means use default (4). + */ + svc_min_chunks_for_challenge: number; } /** diff --git a/src/cascade/uploader.ts b/src/cascade/uploader.ts index c43c879..a5c14f7 100644 --- a/src/cascade/uploader.ts +++ b/src/cascade/uploader.ts @@ -33,6 +33,8 @@ import { toBase64, toCanonicalJsonBytes } from '../internal/encoding'; import { createSingleBlockLayout, generateIds, buildIndexFile } from '../wasm/lep1'; import type { UniversalSigner, ArbitrarySignResponse } from '../wallets/signer'; import { createDefaultSignaturePrompter } from '../wallets/prompter'; +import { buildCommitment, DEFAULT_SVC_CHALLENGE_COUNT, DEFAULT_SVC_MIN_CHUNKS_FOR_CHALLENGE } from './commitment'; +import type { AvailabilityCommitment } from '../codegen/lumera/action/v1/metadata'; export type CascadeSignatureKind = "layout" | "index" | "auth"; @@ -237,7 +239,9 @@ export class CascadeUploader { // Step 1: Get action params from blockchain const actionParams = await this.chainPort.getActionParams(); const rq_ids_max = actionParams.max_raptor_q_symbols; - console.debug('CascadeUploader.registerAction actionParams', { actionParams }); + const svcChallengeCount = actionParams.svc_challenge_count || DEFAULT_SVC_CHALLENGE_COUNT; + const svcMinChunks = actionParams.svc_min_chunks_for_challenge || DEFAULT_SVC_MIN_CHUNKS_FOR_CHALLENGE; + console.debug('CascadeUploader.registerAction actionParams', { actionParams, svcChallengeCount, svcMinChunks }); // Step 2: Generate random initial counter for layout ID derivation const rq_ids_ic = Math.floor(Math.random() * rq_ids_max); @@ -302,6 +306,18 @@ export class CascadeUploader { const indexWithSignature = `${indexFileB64}.${indexSignatureResponse.signature}`; console.debug('CascadeUploader.registerAction indexWithSignature', { indexWithSignature }); + // Step 7b: Build LEP-5 availability commitment (Merkle tree over file chunks) + let availabilityCommitment: AvailabilityCommitment | undefined; + const commitmentResult = await buildCommitment(fileBytes, svcChallengeCount, svcMinChunks); + if (commitmentResult) { + availabilityCommitment = commitmentResult.commitment; + console.debug('CascadeUploader.registerAction built availability commitment', { + chunkSize: availabilityCommitment.chunkSize, + numChunks: availabilityCommitment.numChunks, + challengeIndices: availabilityCommitment.challengeIndices, + }); + } + // Step 8: Prepare auth_signature for upload const authSignatureResponse = await this.requestSignature( "auth", @@ -313,14 +329,35 @@ export class CascadeUploader { console.debug('CascadeUploader.registerAction authSignature', { authSignature }); // Step 9: Register the action on-chain + const msg: Record = { + data_hash: dataHash, + file_name: params.fileName, + rq_ids_ic, + signatures: indexWithSignature, + public: params.isPublic, + }; + if (availabilityCommitment) { + msg.availability_commitment = { + commitment_type: availabilityCommitment.commitmentType, + hash_algo: availabilityCommitment.hashAlgo, + chunk_size: availabilityCommitment.chunkSize, + total_size: (() => { + const v = availabilityCommitment.totalSize; + if (typeof v === "bigint") { + if (v > BigInt(Number.MAX_SAFE_INTEGER)) { + throw new Error(`availability_commitment.total_size exceeds Number.MAX_SAFE_INTEGER: ${v.toString()}`); + } + return Number(v); + } + return v; + })(), + num_chunks: availabilityCommitment.numChunks, + root: Array.from(availabilityCommitment.root), + challenge_indices: availabilityCommitment.challengeIndices, + }; + } const txOutcome = await this.chainPort.requestActionTx({ - msg: { - data_hash: dataHash, - file_name: params.fileName, - rq_ids_ic, - signatures: indexWithSignature, - public: params.isPublic, - }, + msg, expirationTime: params.expirationTime, txPrompter: params.txPrompter, }, fileBytes.length); diff --git a/src/codegen/lumera/action/v1/metadata.ts b/src/codegen/lumera/action/v1/metadata.ts index 57811bd..e3682a7 100644 --- a/src/codegen/lumera/action/v1/metadata.ts +++ b/src/codegen/lumera/action/v1/metadata.ts @@ -1,7 +1,223 @@ // @ts-nocheck /* eslint-disable */ import { BinaryReader, BinaryWriter } from "../../../binary"; +import { GlobalDecoderRegistry } from "../../../registry"; import { DeepPartial } from "../../../helpers"; + +/** + * HashAlgo enumerates hash algorithms used for LEP-5 availability commitments. + */ +export enum HashAlgo { + HASH_ALGO_UNSPECIFIED = 0, + HASH_ALGO_BLAKE3 = 1, +} + +/** + * AvailabilityCommitment is the LEP-5 on-chain file commitment included + * during Cascade registration. + */ +export interface AvailabilityCommitment { + commitmentType: string; + hashAlgo: HashAlgo; + chunkSize: number; + totalSize: bigint; + numChunks: number; + root: Uint8Array; + challengeIndices: number[]; +} +export interface AvailabilityCommitmentAmino { + commitment_type: string; + hash_algo: number; + chunk_size: number; + total_size: string; + num_chunks: number; + root: Uint8Array; + challenge_indices: number[]; +} + +/** + * ChunkProof is a Merkle inclusion proof for one challenged chunk. + */ +export interface ChunkProof { + chunkIndex: number; + leafHash: Uint8Array; + pathHashes: Uint8Array[]; + pathDirections: boolean[]; +} +export interface ChunkProofAmino { + chunk_index: number; + leaf_hash: Uint8Array; + path_hashes: Uint8Array[]; + path_directions: boolean[]; +} + +function createBaseAvailabilityCommitment(): AvailabilityCommitment { + return { + commitmentType: "", + hashAlgo: HashAlgo.HASH_ALGO_UNSPECIFIED, + chunkSize: 0, + totalSize: BigInt(0), + numChunks: 0, + root: new Uint8Array(), + challengeIndices: [] + }; +} + +export const AvailabilityCommitment = { + typeUrl: "/lumera.action.v1.AvailabilityCommitment", + encode(message: AvailabilityCommitment, writer: BinaryWriter = BinaryWriter.create()): BinaryWriter { + if (message.commitmentType !== "") { + writer.uint32(10).string(message.commitmentType); + } + if (message.hashAlgo !== HashAlgo.HASH_ALGO_UNSPECIFIED) { + writer.uint32(16).int32(message.hashAlgo); + } + if (message.chunkSize !== 0) { + writer.uint32(24).uint32(message.chunkSize); + } + if (message.totalSize !== BigInt(0)) { + writer.uint32(32).uint64(message.totalSize); + } + if (message.numChunks !== 0) { + writer.uint32(40).uint32(message.numChunks); + } + if (message.root.length !== 0) { + writer.uint32(50).bytes(message.root); + } + writer.uint32(58).fork(); + for (const v of message.challengeIndices) { + writer.uint32(v); + } + writer.ldelim(); + return writer; + }, + decode(input: BinaryReader | Uint8Array, length?: number): AvailabilityCommitment { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAvailabilityCommitment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.commitmentType = reader.string(); + break; + case 2: + message.hashAlgo = reader.int32() as HashAlgo; + break; + case 3: + message.chunkSize = reader.uint32(); + break; + case 4: + message.totalSize = reader.uint64(); + break; + case 5: + message.numChunks = reader.uint32(); + break; + case 6: + message.root = reader.bytes(); + break; + case 7: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.challengeIndices.push(reader.uint32()); + } + } else { + message.challengeIndices.push(reader.uint32()); + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + fromPartial(object: DeepPartial): AvailabilityCommitment { + const message = createBaseAvailabilityCommitment(); + message.commitmentType = object.commitmentType ?? ""; + message.hashAlgo = object.hashAlgo ?? HashAlgo.HASH_ALGO_UNSPECIFIED; + message.chunkSize = object.chunkSize ?? 0; + message.totalSize = object.totalSize !== undefined && object.totalSize !== null ? BigInt(object.totalSize.toString()) : BigInt(0); + message.numChunks = object.numChunks ?? 0; + message.root = object.root ?? new Uint8Array(); + message.challengeIndices = object.challengeIndices?.map(e => e) || []; + return message; + }, + registerTypeUrl() {} +}; + +function createBaseChunkProof(): ChunkProof { + return { + chunkIndex: 0, + leafHash: new Uint8Array(), + pathHashes: [], + pathDirections: [] + }; +} + +export const ChunkProof = { + typeUrl: "/lumera.action.v1.ChunkProof", + encode(message: ChunkProof, writer: BinaryWriter = BinaryWriter.create()): BinaryWriter { + if (message.chunkIndex !== 0) { + writer.uint32(8).uint32(message.chunkIndex); + } + if (message.leafHash.length !== 0) { + writer.uint32(18).bytes(message.leafHash); + } + for (const v of message.pathHashes) { + writer.uint32(26).bytes(v); + } + writer.uint32(34).fork(); + for (const v of message.pathDirections) { + writer.bool(v); + } + writer.ldelim(); + return writer; + }, + decode(input: BinaryReader | Uint8Array, length?: number): ChunkProof { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseChunkProof(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.chunkIndex = reader.uint32(); + break; + case 2: + message.leafHash = reader.bytes(); + break; + case 3: + message.pathHashes.push(reader.bytes()); + break; + case 4: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.pathDirections.push(reader.bool()); + } + } else { + message.pathDirections.push(reader.bool()); + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + fromPartial(object: DeepPartial): ChunkProof { + const message = createBaseChunkProof(); + message.chunkIndex = object.chunkIndex ?? 0; + message.leafHash = object.leafHash ?? new Uint8Array(); + message.pathHashes = object.pathHashes?.map(e => e) || []; + message.pathDirections = object.pathDirections?.map(e => e) || []; + return message; + }, + registerTypeUrl() {} +}; /** * SenseMetadata contains information for Sense actions. * This metadata is directly embedded in the Action.metadata field. @@ -118,6 +334,14 @@ export interface CascadeMetadata { * or restricted actions. */ public: boolean; + /** + * LEP-5: Availability commitment (Merkle root + challenge indices) + */ + availabilityCommitment?: AvailabilityCommitment; + /** + * LEP-5: Chunk proofs submitted during finalization + */ + chunkProofs: ChunkProof[]; } export interface CascadeMetadataProtoMsg { typeUrl: "/lumera.action.v1.CascadeMetadata"; @@ -161,6 +385,8 @@ export interface CascadeMetadataAmino { * or restricted actions. */ public: boolean; + availability_commitment?: AvailabilityCommitmentAmino; + chunk_proofs: ChunkProofAmino[]; } export interface CascadeMetadataAminoMsg { type: "/lumera.action.v1.CascadeMetadata"; @@ -332,7 +558,9 @@ function createBaseCascadeMetadata(): CascadeMetadata { rqIdsMax: BigInt(0), rqIdsIds: [], signatures: "", - public: false + public: false, + availabilityCommitment: undefined, + chunkProofs: [] }; } /** @@ -378,6 +606,12 @@ export const CascadeMetadata = { if (message.public === true) { writer.uint32(56).bool(message.public); } + if (message.availabilityCommitment !== undefined) { + AvailabilityCommitment.encode(message.availabilityCommitment, writer.uint32(66).fork()).ldelim(); + } + for (const v of message.chunkProofs) { + ChunkProof.encode(v!, writer.uint32(74).fork()).ldelim(); + } return writer; }, decode(input: BinaryReader | Uint8Array, length?: number): CascadeMetadata { @@ -408,6 +642,12 @@ export const CascadeMetadata = { case 7: message.public = reader.bool(); break; + case 8: + message.availabilityCommitment = AvailabilityCommitment.decode(reader, reader.uint32()); + break; + case 9: + message.chunkProofs.push(ChunkProof.decode(reader, reader.uint32())); + break; default: reader.skipType(tag & 7); break; @@ -424,6 +664,8 @@ export const CascadeMetadata = { message.rqIdsIds = object.rqIdsIds?.map(e => e) || []; message.signatures = object.signatures ?? ""; message.public = object.public ?? false; + message.availabilityCommitment = object.availabilityCommitment !== undefined && object.availabilityCommitment !== null ? AvailabilityCommitment.fromPartial(object.availabilityCommitment) : undefined; + message.chunkProofs = object.chunkProofs?.map(e => ChunkProof.fromPartial(e)) || []; return message; }, fromAmino(object: CascadeMetadataAmino): CascadeMetadata { @@ -447,6 +689,10 @@ export const CascadeMetadata = { if (object.public !== undefined && object.public !== null) { message.public = object.public; } + if (object.availability_commitment !== undefined && object.availability_commitment !== null) { + message.availabilityCommitment = AvailabilityCommitment.fromPartial(object.availability_commitment as any); + } + message.chunkProofs = object.chunk_proofs?.map(e => ChunkProof.fromPartial(e as any)) || []; return message; }, toAmino(message: CascadeMetadata): CascadeMetadataAmino { @@ -462,6 +708,25 @@ export const CascadeMetadata = { } obj.signatures = message.signatures === "" ? undefined : message.signatures; obj.public = message.public === false ? undefined : message.public; + obj.availability_commitment = message.availabilityCommitment ? { + commitment_type: message.availabilityCommitment.commitmentType, + hash_algo: message.availabilityCommitment.hashAlgo, + chunk_size: message.availabilityCommitment.chunkSize, + total_size: message.availabilityCommitment.totalSize.toString(), + num_chunks: message.availabilityCommitment.numChunks, + root: message.availabilityCommitment.root, + challenge_indices: message.availabilityCommitment.challengeIndices, + } : undefined; + if (message.chunkProofs) { + obj.chunk_proofs = message.chunkProofs.map(e => ({ + chunk_index: e.chunkIndex, + leaf_hash: e.leafHash, + path_hashes: e.pathHashes, + path_directions: e.pathDirections, + })); + } else { + obj.chunk_proofs = []; + } return obj; }, fromAminoMsg(object: CascadeMetadataAminoMsg): CascadeMetadata { diff --git a/src/codegen/lumera/action/v1/params.ts b/src/codegen/lumera/action/v1/params.ts index fd8902c..3766d2f 100644 --- a/src/codegen/lumera/action/v1/params.ts +++ b/src/codegen/lumera/action/v1/params.ts @@ -35,6 +35,14 @@ export interface Params { */ superNodeFeeShare: string; foundationFeeShare: string; + /** + * LEP-5: Number of chunks to challenge during SVC (default: 8) + */ + svcChallengeCount: number; + /** + * LEP-5: Minimum chunks required for SVC (default: 4) + */ + svcMinChunksForChallenge: number; } export interface ParamsProtoMsg { typeUrl: "/lumera.action.v1.Params"; @@ -70,6 +78,8 @@ export interface ParamsAmino { */ super_node_fee_share: string; foundation_fee_share: string; + svc_challenge_count: number; + svc_min_chunks_for_challenge: number; } export interface ParamsAminoMsg { type: "/lumera.action.v1.Params"; @@ -87,7 +97,9 @@ function createBaseParams(): Params { minProcessingTime: Duration.fromPartial({}), maxProcessingTime: Duration.fromPartial({}), superNodeFeeShare: "", - foundationFeeShare: "" + foundationFeeShare: "", + svcChallengeCount: 0, + svcMinChunksForChallenge: 0 }; } /** @@ -138,6 +150,12 @@ export const Params = { if (message.foundationFeeShare !== "") { writer.uint32(90).string(message.foundationFeeShare); } + if (message.svcChallengeCount !== 0) { + writer.uint32(96).uint32(message.svcChallengeCount); + } + if (message.svcMinChunksForChallenge !== 0) { + writer.uint32(104).uint32(message.svcMinChunksForChallenge); + } return writer; }, decode(input: BinaryReader | Uint8Array, length?: number): Params { @@ -180,6 +198,12 @@ export const Params = { case 11: message.foundationFeeShare = reader.string(); break; + case 12: + message.svcChallengeCount = reader.uint32(); + break; + case 13: + message.svcMinChunksForChallenge = reader.uint32(); + break; default: reader.skipType(tag & 7); break; @@ -200,6 +224,8 @@ export const Params = { message.maxProcessingTime = object.maxProcessingTime !== undefined && object.maxProcessingTime !== null ? Duration.fromPartial(object.maxProcessingTime) : undefined; message.superNodeFeeShare = object.superNodeFeeShare ?? ""; message.foundationFeeShare = object.foundationFeeShare ?? ""; + message.svcChallengeCount = object.svcChallengeCount ?? 0; + message.svcMinChunksForChallenge = object.svcMinChunksForChallenge ?? 0; return message; }, fromAmino(object: ParamsAmino): Params { @@ -237,6 +263,12 @@ export const Params = { if (object.foundation_fee_share !== undefined && object.foundation_fee_share !== null) { message.foundationFeeShare = object.foundation_fee_share; } + if (object.svc_challenge_count !== undefined && object.svc_challenge_count !== null) { + message.svcChallengeCount = object.svc_challenge_count; + } + if (object.svc_min_chunks_for_challenge !== undefined && object.svc_min_chunks_for_challenge !== null) { + message.svcMinChunksForChallenge = object.svc_min_chunks_for_challenge; + } return message; }, toAmino(message: Params): ParamsAmino { @@ -252,6 +284,8 @@ export const Params = { obj.max_processing_time = message.maxProcessingTime ? Duration.toAmino(message.maxProcessingTime) : Duration.toAmino(Duration.fromPartial({})); obj.super_node_fee_share = message.superNodeFeeShare === "" ? undefined : message.superNodeFeeShare; obj.foundation_fee_share = message.foundationFeeShare === "" ? undefined : message.foundationFeeShare; + obj.svc_challenge_count = message.svcChallengeCount === 0 ? undefined : message.svcChallengeCount; + obj.svc_min_chunks_for_challenge = message.svcMinChunksForChallenge === 0 ? undefined : message.svcMinChunksForChallenge; return obj; }, fromAminoMsg(object: ParamsAminoMsg): Params { diff --git a/src/index.ts b/src/index.ts index f2c2fb0..0a71b09 100644 --- a/src/index.ts +++ b/src/index.ts @@ -56,6 +56,28 @@ export type { TxOutcome, } from "./cascade/ports"; +// Storage layer - LEP-5 availability commitment +export { + buildCommitment, + selectChunkSize, + deriveIndices, + hashLeaf, + hashNode, + buildTree, + DEFAULT_CHUNK_SIZE, + MIN_TOTAL_SIZE, + COMMITMENT_TYPE, + DEFAULT_SVC_CHALLENGE_COUNT, + DEFAULT_SVC_MIN_CHUNKS_FOR_CHALLENGE, +} from "./cascade/commitment"; + +// Codegen - LEP-5 types +export { + AvailabilityCommitment, + ChunkProof, + HashAlgo, +} from "./codegen/lumera/action/v1/metadata"; + // Blockchain adapter for Cascade port export { BlockchainActionAdapter } from "./blockchain/adapters/cascade-port"; export type { BlockchainActionAdapterOptions } from "./blockchain/adapters/cascade-port"; diff --git a/src/internal/http.ts b/src/internal/http.ts index 98004ef..a986878 100644 --- a/src/internal/http.ts +++ b/src/internal/http.ts @@ -600,7 +600,14 @@ export class HttpClient { * @returns Full URL */ private buildUrl(path: string, params?: Record): string { - const url = new URL(path, this.config.baseUrl); + const base = new URL(this.config.baseUrl); + const baseHasPath = base.pathname !== '' && base.pathname !== '/'; + + // When baseUrl contains a path prefix (e.g. /proxy/snapi), absolute request + // paths like "/api/v1/..." would otherwise drop that prefix. Preserve it. + const resolvedPath = baseHasPath && path.startsWith('/') ? path.slice(1) : path; + const baseHref = this.config.baseUrl.endsWith('/') ? this.config.baseUrl : `${this.config.baseUrl}/`; + const url = new URL(resolvedPath, baseHref); if (params) { Object.entries(params).forEach(([key, value]) => { diff --git a/tests/cascade/commitment.test.ts b/tests/cascade/commitment.test.ts new file mode 100644 index 0000000..28daba4 --- /dev/null +++ b/tests/cascade/commitment.test.ts @@ -0,0 +1,208 @@ +import { describe, it, expect } from 'vitest'; +import { + selectChunkSize, + chunkBytes, + hashLeaf, + hashNode, + buildTree, + deriveIndices, + buildCommitment, + DEFAULT_CHUNK_SIZE, + MIN_TOTAL_SIZE, + COMMITMENT_TYPE, + DEFAULT_SVC_CHALLENGE_COUNT, + DEFAULT_SVC_MIN_CHUNKS_FOR_CHALLENGE, +} from '../../src/cascade/commitment'; +import { HashAlgo } from '../../src/codegen/lumera/action/v1/metadata'; + +describe('selectChunkSize', () => { + it('returns DEFAULT_CHUNK_SIZE for large files', () => { + // 2MB file with 4 min chunks → 256KiB chunks = 8 chunks ≥ 4 + expect(selectChunkSize(2 * 1024 * 1024, 4)).toBe(DEFAULT_CHUNK_SIZE); + }); + + it('halves chunk size for small files', () => { + // 5KB file: 256KiB → only 1 chunk. Need to halve until ≥ 4 chunks. + // 5120 / 1024 = 5 → chunkSize = 1024 + const chunkSize = selectChunkSize(5120, 4); + expect(Math.ceil(5120 / chunkSize)).toBeGreaterThanOrEqual(4); + }); + + it('returns 1 for very small files', () => { + // 4 bytes, need 4 chunks → chunkSize = 1 + expect(selectChunkSize(4, 4)).toBe(1); + }); +}); + +describe('chunkBytes', () => { + it('splits bytes into chunks of specified size', () => { + const data = new Uint8Array([1, 2, 3, 4, 5, 6, 7]); + const chunks = chunkBytes(data, 3); + expect(chunks).toHaveLength(3); + expect(Array.from(chunks[0])).toEqual([1, 2, 3]); + expect(Array.from(chunks[1])).toEqual([4, 5, 6]); + expect(Array.from(chunks[2])).toEqual([7]); + }); + + it('handles exact division', () => { + const data = new Uint8Array([1, 2, 3, 4]); + const chunks = chunkBytes(data, 2); + expect(chunks).toHaveLength(2); + }); +}); + +describe('hashLeaf', () => { + it('produces 32-byte hash', async () => { + const data = new Uint8Array([1, 2, 3]); + const hash = await hashLeaf(0, data); + expect(hash).toBeInstanceOf(Uint8Array); + expect(hash.length).toBe(32); + }); + + it('different indices produce different hashes', async () => { + const data = new Uint8Array([1, 2, 3]); + const h0 = await hashLeaf(0, data); + const h1 = await hashLeaf(1, data); + expect(h0).not.toEqual(h1); + }); +}); + +describe('hashNode', () => { + it('produces 32-byte hash', async () => { + const left = new Uint8Array(32).fill(1); + const right = new Uint8Array(32).fill(2); + const hash = await hashNode(left, right); + expect(hash).toBeInstanceOf(Uint8Array); + expect(hash.length).toBe(32); + }); + + it('is order-dependent', async () => { + const a = new Uint8Array(32).fill(1); + const b = new Uint8Array(32).fill(2); + const h1 = await hashNode(a, b); + const h2 = await hashNode(b, a); + expect(h1).not.toEqual(h2); + }); +}); + +describe('buildTree', () => { + it('builds tree with single leaf', async () => { + const leaf = new Uint8Array(32).fill(42); + const tree = await buildTree([leaf]); + expect(tree).toHaveLength(1); + expect(tree[0]).toHaveLength(1); + expect(tree[0][0]).toEqual(leaf); + }); + + it('builds tree with two leaves', async () => { + const l0 = new Uint8Array(32).fill(1); + const l1 = new Uint8Array(32).fill(2); + const tree = await buildTree([l0, l1]); + expect(tree).toHaveLength(2); + expect(tree[0]).toHaveLength(2); // leaves + expect(tree[1]).toHaveLength(1); // root + }); + + it('builds tree with odd number of leaves', async () => { + const leaves = [ + new Uint8Array(32).fill(1), + new Uint8Array(32).fill(2), + new Uint8Array(32).fill(3), + ]; + const tree = await buildTree(leaves); + // Level 0: 3 leaves, Level 1: 2 nodes, Level 2: 1 root + expect(tree).toHaveLength(3); + expect(tree[tree.length - 1]).toHaveLength(1); // root + }); +}); + +describe('deriveIndices', () => { + it('produces correct number of indices', async () => { + const root = new Uint8Array(32).fill(0xAB); + const indices = await deriveIndices(root, 100, 8); + expect(indices).toHaveLength(8); + }); + + it('all indices are unique', async () => { + const root = new Uint8Array(32).fill(0xCD); + const indices = await deriveIndices(root, 100, 8); + const unique = new Set(indices); + expect(unique.size).toBe(indices.length); + }); + + it('all indices are within range', async () => { + const root = new Uint8Array(32).fill(0xEF); + const indices = await deriveIndices(root, 10, 8); + for (const idx of indices) { + expect(idx).toBeGreaterThanOrEqual(0); + expect(idx).toBeLessThan(10); + } + }); + + it('caps at numChunks if fewer than challengeCount', async () => { + const root = new Uint8Array(32).fill(0x11); + const indices = await deriveIndices(root, 3, 8); + expect(indices).toHaveLength(3); // can't have more than numChunks unique indices + }); + + it('is deterministic', async () => { + const root = new Uint8Array(32).fill(0x22); + const i1 = await deriveIndices(root, 50, 8); + const i2 = await deriveIndices(root, 50, 8); + expect(i1).toEqual(i2); + }); +}); + +describe('buildCommitment', () => { + it('returns undefined for tiny files', async () => { + const data = new Uint8Array([1, 2, 3]); // 3 bytes < MIN_TOTAL_SIZE + const result = await buildCommitment(data); + expect(result).toBeUndefined(); + }); + + it('builds commitment for normal file', async () => { + // 2KB file + const data = new Uint8Array(2048); + for (let i = 0; i < data.length; i++) data[i] = i % 256; + + const result = await buildCommitment(data, 8, 4); + expect(result).toBeDefined(); + const { commitment, tree } = result!; + + expect(commitment.commitmentType).toBe(COMMITMENT_TYPE); + expect(commitment.hashAlgo).toBe(HashAlgo.HASH_ALGO_BLAKE3); + expect(commitment.totalSize).toBe(BigInt(2048)); + expect(commitment.root.length).toBe(32); + expect(commitment.challengeIndices.length).toBeGreaterThan(0); + expect(commitment.challengeIndices.length).toBeLessThanOrEqual(8); + + // All indices should be unique and in range + const unique = new Set(commitment.challengeIndices); + expect(unique.size).toBe(commitment.challengeIndices.length); + for (const idx of commitment.challengeIndices) { + expect(idx).toBeGreaterThanOrEqual(0); + expect(idx).toBeLessThan(commitment.numChunks); + } + + // Tree root should match commitment root + expect(tree[tree.length - 1][0]).toEqual(commitment.root); + }); + + it('is deterministic', async () => { + const data = new Uint8Array(1024); + for (let i = 0; i < data.length; i++) data[i] = i % 256; + + const r1 = await buildCommitment(data, 4, 4); + const r2 = await buildCommitment(data, 4, 4); + expect(r1!.commitment.root).toEqual(r2!.commitment.root); + expect(r1!.commitment.challengeIndices).toEqual(r2!.commitment.challengeIndices); + }); + + it('handles edge case: exactly MIN_TOTAL_SIZE bytes', async () => { + const data = new Uint8Array([0xDE, 0xAD, 0xBE, 0xEF]); // exactly 4 bytes + const result = await buildCommitment(data, 8, 4); + expect(result).toBeDefined(); + expect(result!.commitment.numChunks).toBe(4); // 4 bytes, chunkSize=1 → 4 chunks + expect(result!.commitment.chunkSize).toBe(1); + }); +});