Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion p2p/kademlia/version_gate.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
package kademlia

import "strings"
import (
"os"
"strings"
)

var requiredVer string

Expand All @@ -18,6 +21,11 @@ func requiredVersion() string {
// Policy: required and peer must both be non-empty and exactly equal.
func versionMismatch(peerVersion string) (required string, mismatch bool) {
required = requiredVersion()
// Bypass strict gating during integration tests.
// Tests set os.Setenv("INTEGRATION_TEST", "true").
if os.Getenv("INTEGRATION_TEST") == "true" {
return required, false
}
peer := strings.TrimSpace(peerVersion)
if required == "" || peer == "" || peer != required {
return required, true
Expand Down
66 changes: 66 additions & 0 deletions pkg/cascadekit/cascadekit_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
package cascadekit

import (
"encoding/base64"
"testing"

"github.com/LumeraProtocol/supernode/v2/pkg/codec"
"github.com/klauspost/compress/zstd"
)

func TestExtractIndexAndCreatorSig_Strict(t *testing.T) {
// too few parts
if _, _, err := ExtractIndexAndCreatorSig("abc"); err == nil {
t.Fatalf("expected error for single segment")
}
// too many parts
if _, _, err := ExtractIndexAndCreatorSig("a.b.c"); err == nil {
t.Fatalf("expected error for three segments")
}
// exactly two parts
a, b, err := ExtractIndexAndCreatorSig("a.b")
if err != nil || a != "a" || b != "b" {
t.Fatalf("unexpected result: a=%q b=%q err=%v", a, b, err)
}
}

func TestParseCompressedIndexFile_Strict(t *testing.T) {
idx := IndexFile{LayoutIDs: []string{"L1", "L2"}, LayoutSignature: base64.StdEncoding.EncodeToString([]byte("sig"))}
idxB64, err := EncodeIndexB64(idx)
if err != nil {
t.Fatalf("encode index: %v", err)
}
payload := []byte(idxB64 + "." + base64.StdEncoding.EncodeToString([]byte("sig2")) + ".0")

enc, _ := zstd.NewWriter(nil)
defer enc.Close()
compressed := enc.EncodeAll(payload, nil)

got, err := ParseCompressedIndexFile(compressed)
if err != nil {
t.Fatalf("parse compressed index: %v", err)
}
if got.LayoutSignature != idx.LayoutSignature || len(got.LayoutIDs) != 2 {
t.Fatalf("unexpected index decoded: %+v", got)
}

// malformed: only two segments
compressedBad := enc.EncodeAll([]byte("a.b"), nil)
if _, err := ParseCompressedIndexFile(compressedBad); err == nil {
t.Fatalf("expected error for two segments")
}
// malformed: four segments
compressedBad4 := enc.EncodeAll([]byte("a.b.c.d"), nil)
if _, err := ParseCompressedIndexFile(compressedBad4); err == nil {
t.Fatalf("expected error for four segments")
}
}

func TestVerifySingleBlock(t *testing.T) {
if err := VerifySingleBlock(codec.Layout{Blocks: []codec.Block{{}}}); err != nil {
t.Fatalf("unexpected error for single block: %v", err)
}
if err := VerifySingleBlock(codec.Layout{Blocks: []codec.Block{{}, {}}}); err == nil {
t.Fatalf("expected error for multi-block layout")
}
}
2 changes: 1 addition & 1 deletion pkg/cascadekit/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
// Scope:
// - Build and sign layout metadata (RaptorQ layout) and index files
// - Generate redundant metadata files and index files + their IDs
// - Extract and decode index payloads from the on-chain signatures string
// - Extract and decode index payloads from the on-chain index signature format string
// - Compute data hashes for request metadata
// - Verify single-block layout consistency (explicit error if more than 1 block)
//
Expand Down
15 changes: 2 additions & 13 deletions pkg/cascadekit/hash.go
Original file line number Diff line number Diff line change
@@ -1,26 +1,15 @@
package cascadekit

import (
"bytes"
"encoding/base64"
"io"

"lukechampine.com/blake3"
"github.com/LumeraProtocol/supernode/v2/pkg/utils"
)

// ComputeBlake3Hash computes a 32-byte Blake3 hash of the given data.
func ComputeBlake3Hash(msg []byte) ([]byte, error) {
hasher := blake3.New(32, nil)
if _, err := io.Copy(hasher, bytes.NewReader(msg)); err != nil {
return nil, err
}
return hasher.Sum(nil), nil
}

// ComputeBlake3DataHashB64 computes a Blake3 hash of the input and
// returns it as a base64-encoded string.
func ComputeBlake3DataHashB64(data []byte) (string, error) {
h, err := ComputeBlake3Hash(data)
h, err := utils.Blake3Hash(data)
if err != nil {
return "", err
}
Expand Down
124 changes: 63 additions & 61 deletions pkg/cascadekit/ids.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,100 +2,102 @@ package cascadekit

import (
"bytes"
"fmt"
"strconv"

"github.com/LumeraProtocol/supernode/v2/pkg/errors"
"github.com/LumeraProtocol/supernode/v2/pkg/utils"
"github.com/cosmos/btcutil/base58"
"github.com/klauspost/compress/zstd"
)

// GenerateLayoutIDs computes IDs for redundant layout files (not the final index IDs).
// The ID is base58(blake3(zstd(layout_b64.layout_sig_b64.counter))).
func GenerateLayoutIDs(layoutB64, layoutSigB64 string, ic, max uint32) []string {
layoutWithSig := fmt.Sprintf("%s.%s", layoutB64, layoutSigB64)
layoutIDs := make([]string, max)

var buffer bytes.Buffer
buffer.Grow(len(layoutWithSig) + 10)

for i := uint32(0); i < max; i++ {
buffer.Reset()
buffer.WriteString(layoutWithSig)
buffer.WriteByte('.')
buffer.WriteString(fmt.Sprintf("%d", ic+i))

compressedData, err := utils.ZstdCompress(buffer.Bytes())
if err != nil {
continue
}

hash, err := utils.Blake3Hash(compressedData)
if err != nil {
continue
}

layoutIDs[i] = base58.Encode(hash)
}

return layoutIDs
// The ID is base58(blake3(zstd(layout_signature_format.counter))).
// layoutSignatureFormat must be: base64(JSON(layout)).layout_signature_base64
func GenerateLayoutIDs(layoutSignatureFormat string, ic, max uint32) ([]string, error) {
return generateIDs([]byte(layoutSignatureFormat), ic, max)
}

// GenerateIndexIDs computes IDs for index files from the full signatures string.
func GenerateIndexIDs(signatures string, ic, max uint32) []string {
indexFileIDs := make([]string, max)

var buffer bytes.Buffer
buffer.Grow(len(signatures) + 10)

for i := uint32(0); i < max; i++ {
buffer.Reset()
buffer.WriteString(signatures)
buffer.WriteByte('.')
buffer.WriteString(fmt.Sprintf("%d", ic+i))

compressedData, err := utils.ZstdCompress(buffer.Bytes())
if err != nil {
continue
}
hash, err := utils.Blake3Hash(compressedData)
if err != nil {
continue
}
indexFileIDs[i] = base58.Encode(hash)
}
return indexFileIDs
// GenerateIndexIDs computes IDs for index files from the full index signature format string.
func GenerateIndexIDs(indexSignatureFormat string, ic, max uint32) ([]string, error) {
return generateIDs([]byte(indexSignatureFormat), ic, max)
}

// getIDFiles generates ID files by appending a '.' and counter, compressing,
// and returning both IDs and compressed payloads.
func getIDFiles(file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) {
// generateIDFiles builds compressed ID files from a base payload and returns
// both their content-addressed IDs and the compressed files themselves.
// For each counter in [ic..ic+max-1], the payload is:
//
// base + '.' + counter
//
// then zstd-compressed; the ID is base58(blake3(compressed)).
func generateIDFiles(base []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) {
idFiles := make([][]byte, 0, max)
ids = make([]string, 0, max)
var buffer bytes.Buffer

// Reuse a single zstd encoder across iterations
enc, zerr := zstd.NewWriter(nil)
if zerr != nil {
return ids, idFiles, errors.Errorf("compress identifiers file: %w", zerr)
}
defer enc.Close()

for i := uint32(0); i < max; i++ {
buffer.Reset()
counter := ic + i

buffer.Write(file)
buffer.Write(base)
buffer.WriteByte(SeparatorByte)
buffer.WriteString(strconv.Itoa(int(counter)))
// Append counter efficiently without intermediate string
var tmp [20]byte
cnt := strconv.AppendUint(tmp[:0], uint64(counter), 10)
buffer.Write(cnt)

compressedData, err := utils.ZstdCompress(buffer.Bytes())
if err != nil {
return ids, idFiles, errors.Errorf("compress identifiers file: %w", err)
}
compressedData := enc.EncodeAll(buffer.Bytes(), nil)

idFiles = append(idFiles, compressedData)

hash, err := utils.Blake3Hash(compressedData)
if err != nil {
return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err)
return ids, idFiles, errors.Errorf("blake3 hash error getting an id file: %w", err)
}

ids = append(ids, base58.Encode(hash))
}

return ids, idFiles, nil
}

// generateIDs computes base58(blake3(zstd(base + '.' + counter))) for counters ic..ic+max-1.
// It reuses a single zstd encoder and avoids per-iteration heap churn.
func generateIDs(base []byte, ic, max uint32) ([]string, error) {
ids := make([]string, max)

var buffer bytes.Buffer
// Reserve base length + dot + up to 10 digits
buffer.Grow(len(base) + 12)

enc, err := zstd.NewWriter(nil)
if err != nil {
return nil, errors.Errorf("zstd encoder init: %w", err)
}
defer enc.Close()

for i := uint32(0); i < max; i++ {
buffer.Reset()
buffer.Write(base)
buffer.WriteByte(SeparatorByte)
var tmp [20]byte
cnt := strconv.AppendUint(tmp[:0], uint64(ic+i), 10)
buffer.Write(cnt)

compressed := enc.EncodeAll(buffer.Bytes(), nil)
h, err := utils.Blake3Hash(compressed)
if err != nil {
return nil, errors.Errorf("blake3 hash (i=%d): %w", i, err)
}
ids[i] = base58.Encode(h)
}
return ids, nil
}
25 changes: 10 additions & 15 deletions pkg/cascadekit/index.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,13 @@ func BuildIndex(layoutIDs []string, layoutSigB64 string) IndexFile {
return IndexFile{LayoutIDs: layoutIDs, LayoutSignature: layoutSigB64}
}

// EncodeIndexB64 marshals an index file and returns both the raw JSON and base64.
func EncodeIndexB64(idx IndexFile) (b64 string, raw []byte, err error) {
raw, err = json.Marshal(idx)
// EncodeIndexB64 marshals an index file and returns its base64-encoded JSON.
func EncodeIndexB64(idx IndexFile) (string, error) {
raw, err := json.Marshal(idx)
if err != nil {
return "", nil, errors.Errorf("marshal index file: %w", err)
return "", errors.Errorf("marshal index file: %w", err)
}
return base64.StdEncoding.EncodeToString(raw), raw, nil
return base64.StdEncoding.EncodeToString(raw), nil
}

// DecodeIndexB64 decodes base64(JSON(IndexFile)).
Expand All @@ -46,17 +46,12 @@ func DecodeIndexB64(data string) (IndexFile, error) {
return indexFile, nil
}

// ExtractIndexAndCreatorSig splits a signatures string formatted as:
// ExtractIndexAndCreatorSig splits a signature-format string formatted as:
// Base64(index_json).Base64(creator_signature)
func ExtractIndexAndCreatorSig(signatures string) (indexB64 string, creatorSigB64 string, err error) {
parts := strings.Split(signatures, ".")
if len(parts) < 2 {
return "", "", errors.New("invalid signatures format")
func ExtractIndexAndCreatorSig(indexSignatureFormat string) (indexB64 string, creatorSigB64 string, err error) {
parts := strings.Split(indexSignatureFormat, ".")
if len(parts) != 2 {
return "", "", errors.New("invalid index signature format: expected 2 segments (index_b64.creator_sig_b64)")
}
return parts[0], parts[1], nil
}

// MakeSignatureFormat composes the final signatures string.
func MakeSignatureFormat(indexB64, creatorSigB64 string) string {
return indexB64 + "." + creatorSigB64
}
4 changes: 2 additions & 2 deletions pkg/cascadekit/index_parse.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ func ParseCompressedIndexFile(data []byte) (IndexFile, error) {
return IndexFile{}, errors.Errorf("decompress index file: %w", err)
}
parts := bytes.Split(decompressed, []byte{SeparatorByte})
if len(parts) < 2 {
return IndexFile{}, errors.New("invalid index file format")
if len(parts) != 3 {
return IndexFile{}, errors.New("invalid index file format: expected 3 parts (index_b64.creator_sig_b64.counter)")
}
return DecodeIndexB64(string(parts[0]))
}
14 changes: 14 additions & 0 deletions pkg/cascadekit/keyring_signatures.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
package cascadekit

import (
"github.com/LumeraProtocol/supernode/v2/pkg/codec"
keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring"
cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring"
)

// CreateSignaturesWithKeyring signs layout and index using a Cosmos keyring.
// These helpers centralize keyring-backed signing for clarity.
func CreateSignaturesWithKeyring(layout codec.Layout, kr cosmoskeyring.Keyring, keyName string, ic, max uint32) (string, []string, error) {
signer := func(msg []byte) ([]byte, error) { return keyringpkg.SignBytes(kr, keyName, msg) }
return CreateSignatures(layout, signer, ic, max)
}
4 changes: 2 additions & 2 deletions pkg/cascadekit/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@ import (

// NewCascadeMetadata creates a types.CascadeMetadata for RequestAction.
// The keeper will populate rq_ids_max; rq_ids_ids is for FinalizeAction only.
func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, signatures string, public bool) actiontypes.CascadeMetadata {
func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, indexSignatureFormat string, public bool) actiontypes.CascadeMetadata {
return actiontypes.CascadeMetadata{
DataHash: dataHashB64,
FileName: fileName,
RqIdsIc: rqIdsIc,
Signatures: signatures,
Signatures: indexSignatureFormat,
Public: public,
}
}
2 changes: 1 addition & 1 deletion pkg/cascadekit/parsers.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@ package cascadekit

import (
"bytes"
"encoding/json"

"github.com/LumeraProtocol/supernode/v2/pkg/codec"
"github.com/LumeraProtocol/supernode/v2/pkg/errors"
"github.com/LumeraProtocol/supernode/v2/pkg/utils"
json "github.com/json-iterator/go"
)

// ParseRQMetadataFile parses a compressed rq metadata file into layout, signature and counter.
Expand Down
Loading