From 19c4e6294095f908e951c9197996756d1b564f55 Mon Sep 17 00:00:00 2001 From: Xav Date: Tue, 3 Mar 2026 10:53:07 -0500 Subject: [PATCH 01/16] feat(ffi): wip initial implementation --- archivist/archivist.nim | 2 +- library/Makefile | 80 +++ library/alloc.nim | 101 +++ library/archivist_context.nim | 187 ++++++ .../archivist_thread_request.nim | 117 ++++ .../requests/node_debug_request.nim | 106 +++ .../requests/node_download_request.nim | 306 +++++++++ .../requests/node_info_request.nim | 69 ++ .../requests/node_lifecycle_request.nim | 192 ++++++ .../requests/node_p2p_request.nim | 187 ++++++ .../requests/node_storage_request.nim | 185 ++++++ .../requests/node_upload_request.nim | 303 +++++++++ library/config.nims | 1 + library/ffi_types.nim | 65 ++ library/libarchivist.h | 610 +++++++++++++++++ library/libarchivist.nim | 620 ++++++++++++++++++ library/test_ffi.c | 452 +++++++++++++ 17 files changed, 3582 insertions(+), 1 deletion(-) create mode 100644 library/Makefile create mode 100644 library/alloc.nim create mode 100644 library/archivist_context.nim create mode 100644 library/archivist_thread_requests/archivist_thread_request.nim create mode 100644 library/archivist_thread_requests/requests/node_debug_request.nim create mode 100644 library/archivist_thread_requests/requests/node_download_request.nim create mode 100644 library/archivist_thread_requests/requests/node_info_request.nim create mode 100644 library/archivist_thread_requests/requests/node_lifecycle_request.nim create mode 100644 library/archivist_thread_requests/requests/node_p2p_request.nim create mode 100644 library/archivist_thread_requests/requests/node_storage_request.nim create mode 100644 library/archivist_thread_requests/requests/node_upload_request.nim create mode 100644 library/config.nims create mode 100644 library/ffi_types.nim create mode 100644 library/libarchivist.h create mode 100644 library/libarchivist.nim create mode 100644 library/test_ffi.c diff --git a/archivist/archivist.nim b/archivist/archivist.nim index 96dfb2b4..a490f792 100644 --- a/archivist/archivist.nim +++ b/archivist/archivist.nim @@ -46,7 +46,7 @@ type NodeServer* = ref object config: NodeConf restServer: RestServerRef - archivistNode: ArchivistNodeRef + archivistNode*: ArchivistNodeRef repoStore: RepoStore maintenance: BlockMaintainer taskpool: Taskpool diff --git a/library/Makefile b/library/Makefile new file mode 100644 index 00000000..42edcec6 --- /dev/null +++ b/library/Makefile @@ -0,0 +1,80 @@ +# Makefile for building and testing libarchivist + +CC = gcc +CFLAGS = -Wall -Wextra -O2 -fPIC +LDFLAGS = -Lbuild/lib -larchivist -lpthread + +NIM = nim +NIM_FLAGS = --app:lib --opt:speed -d:release --mm:arc --nimMainPrefix:libarchivist + +BUILD_DIR = build +LIB_DIR = $(BUILD_DIR)/lib +BIN_DIR = $(BUILD_DIR)/bin + +LIB_NAME = libarchivist +LIB_SO = $(LIB_DIR)/$(LIB_NAME).so +LIB_A = $(LIB_DIR)/$(LIB_NAME).a +TEST_BIN = $(BIN_DIR)/test_ffi + +NIM_SOURCES = libarchivist.nim +C_TEST_SOURCES = test_ffi.c + +.PHONY: all +all: $(LIB_SO) $(TEST_BIN) + +$(BUILD_DIR): + mkdir -p $(BUILD_DIR) + +$(LIB_DIR): + mkdir -p $(LIB_DIR) + +$(BIN_DIR): + mkdir -p $(BIN_DIR) + +$(LIB_SO): $(NIM_SOURCES) | $(LIB_DIR) + cd .. && $(NIM) c $(NIM_FLAGS) --out:library/$(LIB_SO) library/$(NIM_SOURCES) + +$(LIB_A): $(NIM_SOURCES) | $(LIB_DIR) + $(NIM) c $(NIM_FLAGS) --out:$(LIB_A) $(NIM_SOURCES) + +$(TEST_BIN): $(C_TEST_SOURCES) $(LIB_SO) | $(BIN_DIR) + $(CC) $(CFLAGS) -o $(TEST_BIN) $(C_TEST_SOURCES) $(LDFLAGS) -I. + +.PHONY: test +test: $(TEST_BIN) + @echo "Running FFI tests..." + @LD_LIBRARY_PATH=$(LIB_DIR) $(TEST_BIN) + +.PHONY: clean +clean: + rm -rf $(BUILD_DIR) + rm -f *.so *.a + +.PHONY: install +install: $(LIB_SO) + install -d /usr/local/lib + install -m 644 $(LIB_SO) /usr/local/lib/ + install -d /usr/local/include + install -m 644 libarchivist.h /usr/local/include/ + ldconfig + +.PHONY: uninstall +uninstall: + rm -f /usr/local/lib/$(LIB_NAME).so + rm -f /usr/local/include/libarchivist.h + ldconfig + +.PHONY: check +check: + $(NIM) check libarchivist.nim + +.PHONY: help +help: + @echo "Available targets:" + @echo " all - Build library and test program (default)" + @echo " test - Run FFI tests" + @echo " clean - Remove build artifacts" + @echo " install - Install library to /usr/local" + @echo " uninstall - Remove library from /usr/local" + @echo " check - Check Nim code for errors" + @echo " help - Show this help message" diff --git a/library/alloc.nim b/library/alloc.nim new file mode 100644 index 00000000..81140f1e --- /dev/null +++ b/library/alloc.nim @@ -0,0 +1,101 @@ +## Memory allocation helpers for FFI +## +## This file provides memory allocation utilities for the library's FFI layer. +## These helpers are designed for thread-safe shared memory allocation. + +{.pragma: exported, exportc, cdecl, raises: [].} + +################################################################################ +### SharedSeq type for thread-safe sequences + +type SharedSeq*[T] = tuple[data: ptr UncheckedArray[T], len: int] + +################################################################################ +### String allocation helpers (shared memory for thread safety) + +proc alloc*(str: cstring): cstring = + if str.isNil: + var ret = cast[cstring](allocShared(1)) + ret[0] = '\0' + return ret + + let ret = cast[cstring](allocShared(len(str) + 1)) + copyMem(ret, str, len(str) + 1) + return ret + +proc alloc*(str: string): cstring = + if str.len == 0: + var ret = cast[cstring](allocShared(1)) + ret[0] = '\0' + return ret + + var ret = cast[cstring](allocShared(str.len + 1)) + let s = cast[seq[char]](str) + for i in 0 ..< str.len: + ret[i] = s[i] + ret[str.len] = '\0' + return ret + +proc allocCString*(s: string): cstring = + return alloc(s) + +proc deallocCString*(s: cstring) = + if not s.isNil: + deallocShared(s) + +################################################################################ +### Buffer allocation helpers + +proc allocBuffer*(size: csize_t): pointer = + if size == 0: + return nil + result = allocShared0(size) + +proc deallocBuffer*(p: pointer) = + ## Free a buffer allocated by allocBuffer. + if not p.isNil: + deallocShared(p) + +################################################################################ +### SharedSeq helpers + +proc allocSharedSeq*[T](s: seq[T]): SharedSeq[T] = + let data = allocShared(sizeof(T) * s.len) + if s.len != 0: + copyMem(data, unsafeAddr s[0], sizeof(T) * s.len) + return (cast[ptr UncheckedArray[T]](data), s.len) + +proc deallocSharedSeq*[T](s: var SharedSeq[T]) = + ## Free a SharedSeq. + deallocShared(s.data) + s.len = 0 + +proc toSeq*[T](s: SharedSeq[T]): seq[T] = + var ret = newSeq[T]() + for i in 0 ..< s.len: + ret.add(s.data[i]) + return ret + +################################################################################ +### Safe string copy + +proc copyToBuffer*(dest: pointer, src: string, maxSize: csize_t): csize_t = + if dest.isNil or maxSize == 0: + return 0 + let copyLen = min(src.len, maxSize.int) + copyMem(dest, unsafeAddr src[0], copyLen) + return copyLen.csize_t + +################################################################################ +### Shared object allocation + +proc createShared*[T](): ptr T = + result = cast[ptr T](allocShared0(sizeof(T))) + +proc createShared*[T](val: T): ptr T = + result = cast[ptr T](allocShared0(sizeof(T))) + result[] = val + +proc destroyShared*[T](p: ptr T) = + if not p.isNil: + deallocShared(p) diff --git a/library/archivist_context.nim b/library/archivist_context.nim new file mode 100644 index 00000000..c6bac000 --- /dev/null +++ b/library/archivist_context.nim @@ -0,0 +1,187 @@ +## Archivist Context and Thread Management +## +## This file defines the Archivist context and its thread flow: +## 1. Client enqueues a request and signals the Archivist thread. +## 2. The Archivist thread dequeues the request and sends an ack (reqReceivedSignal). +## 3. The Archivist thread executes the request asynchronously. +## 4. On completion, the Archivist thread invokes the client callback with the result and userData. + +{.pragma: exported, exportc, cdecl, raises: [].} +{.pragma: callback, cdecl, raises: [], gcsafe.} +{.passc: "-fPIC".} + +import std/[options, locks, atomics] +import chronicles +import chronos +import chronos/threadsync +import taskpools/channels_spsc_single +import ./ffi_types +import ./archivist_thread_requests/[archivist_thread_request] + +from ../archivist/archivist import NodeServer + +logScope: + topics = "libarchivist" + +type ArchivistContext* = object + thread: Thread[(ptr ArchivistContext)] + + # TODO: Should probably use a MP Channel insted of SP to process requests concurrently + lock: Lock + + reqChannel: ChannelSPSCSingle[ptr ArchivistThreadRequest] + + reqSignal: ThreadSignalPtr + + reqReceivedSignal: ThreadSignalPtr + + userData*: pointer + + eventCallback*: pointer + + eventUserData*: pointer + + running: Atomic[bool] + +template callEventCallback(ctx: ptr ArchivistContext, eventName: string, body: untyped) = + if isNil(ctx[].eventCallback): + error eventName & " - eventCallback is nil" + return + + foreignThreadGc: + try: + let event = body + cast[ArchivistCallback](ctx[].eventCallback)( + RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData + ) + except CatchableError: + let msg = + "Exception " & eventName & " when calling 'eventCallBack': " & + getCurrentExceptionMsg() + cast[ArchivistCallback](ctx[].eventCallback)( + RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData + ) + +proc sendRequestToArchivistThread*( + ctx: ptr ArchivistContext, + reqType: RequestType, + reqContent: pointer, + callback: ArchivistCallback, + userData: pointer, + timeout = InfiniteDuration, +): Result[void, string] = + ctx.lock.acquire() + + defer: + ctx.lock.release() + + let req = ArchivistThreadRequest.createShared(reqType, reqContent, callback, userData) + + let sentOk = ctx.reqChannel.trySend(req) + if not sentOk: + deallocShared(req) + return err("Failed to send request to the Archivist thread: " & $req[]) + + let fireSyncRes = ctx.reqSignal.fireSync() + if fireSyncRes.isErr(): + deallocShared(req) + return err( + "Failed to send request to the Archivist thread: unable to fireSync: " & + $fireSyncRes.error + ) + + if fireSyncRes.get() == false: + deallocShared(req) + return + err("Failed to send request to the Archivist thread: fireSync timed out.") + + let res = ctx.reqReceivedSignal.waitSync(timeout) + if res.isErr(): + deallocShared(req) + return err( + "Failed to send request to the Archivist thread: unable to receive reqReceivedSignal signal." + ) + + ok() + +proc runArchivist(ctx: ptr ArchivistContext) {.async: (raises: []).} = + var archivist: NodeServer + + while true: + try: + await ctx.reqSignal.wait() + except Exception as e: + error "Failure in run Archivist thread while waiting for reqSignal.", + error = e.msg + continue + + if ctx.running.load == false: + break + + var request: ptr ArchivistThreadRequest + + let recvOk = ctx.reqChannel.tryRecv(request) + if not recvOk: + error "Failure in run Archivist: unable to receive request in Archivist thread." + continue + + asyncSpawn ( + proc() {.async.} = + await sleepAsync(0) + await ArchivistThreadRequest.process(request, addr archivist) + )() + + let fireRes = ctx.reqReceivedSignal.fireSync() + if fireRes.isErr(): + error "Failure in run Archivist: unable to fire back to requester thread.", + error = fireRes.error + +proc run(ctx: ptr ArchivistContext) {.thread.} = + waitFor runArchivist(ctx) + +proc createArchivistContext*(): Result[ptr ArchivistContext, string] = + var ctx = createShared(ArchivistContext, 1) + + ctx.reqSignal = ThreadSignalPtr.new().valueOr: + return + err("Failed to create a context: unable to create reqSignal ThreadSignalPtr.") + + ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr: + return err( + "Failed to create Archivist context: unable to create reqReceivedSignal ThreadSignalPtr." + ) + + ctx.lock.initLock() + + ctx.running.store(true) + + try: + createThread(ctx.thread, run, ctx) + except ValueError, ResourceExhaustedError: + freeShared(ctx) + return err( + "Failed to create Archivist context: unable to create thread: " & + getCurrentExceptionMsg() + ) + + return ok(ctx) + +proc destroyArchivistContext*(ctx: ptr ArchivistContext): Result[void, string] = + ctx.running.store(false) + + let signaledOnTime = ctx.reqSignal.fireSync().valueOr: + return err("Failed to destroy Archivist context: " & $error) + + if not signaledOnTime: + return err( + "Failed to destroy Archivist context: unable to get signal reqSignal on time in destroyArchivistContext." + ) + + joinThread(ctx.thread) + + ctx.lock.deinitLock() + ?ctx.reqSignal.close() + ?ctx.reqReceivedSignal.close() + freeShared(ctx) + + return ok() diff --git a/library/archivist_thread_requests/archivist_thread_request.nim b/library/archivist_thread_requests/archivist_thread_request.nim new file mode 100644 index 00000000..efef1893 --- /dev/null +++ b/library/archivist_thread_requests/archivist_thread_request.nim @@ -0,0 +1,117 @@ +## This file contains the base message request type that will be handled. +## The requests are created by the main thread and processed by +## the Archivist Thread. + +import std/json +import results +import chronos +import ../ffi_types +import ./requests/node_lifecycle_request +import ./requests/node_info_request +import ./requests/node_debug_request +import ./requests/node_p2p_request +import ./requests/node_upload_request +import ./requests/node_download_request +import ./requests/node_storage_request + +from ../../archivist/archivist import NodeServer + +type RequestType* {.pure.} = enum + LIFECYCLE + INFO + DEBUG + P2P + UPLOAD + DOWNLOAD + STORAGE + +type ArchivistThreadRequest* = object + reqType: RequestType + + reqContent: pointer + + callback: ArchivistCallback + + userData: pointer + +proc createShared*( + T: type ArchivistThreadRequest, + reqType: RequestType, + reqContent: pointer, + callback: ArchivistCallback, + userData: pointer, +): ptr type T = + var ret = createShared(T) + ret[].reqType = reqType + ret[].reqContent = reqContent + ret[].callback = callback + ret[].userData = userData + return ret + +# TODO: Look into how to improve callback handling (thread pool/mp channel) +proc handleRes[T: string | void | seq[byte]]( + res: Result[T, string], request: ptr ArchivistThreadRequest +) = + defer: + deallocShared(request) + + if res.isErr(): + foreignThreadGc: + let msg = $res.error + if msg == "": + request[].callback(RET_ERR, nil, cast[csize_t](0), request[].userData) + else: + request[].callback( + RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData + ) + return + + foreignThreadGc: + var msg: cstring = "" + when T is string: + msg = res.get().cstring() + request[].callback( + RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData + ) + return + +proc process*( + T: type ArchivistThreadRequest, + request: ptr ArchivistThreadRequest, + archivist: ptr NodeServer, +) {.async: (raises: []).} = + let retFut = + case request[].reqType + of LIFECYCLE: + cast[ptr NodeLifecycleRequest](request[].reqContent).process(archivist) + of INFO: + cast[ptr NodeInfoRequest](request[].reqContent).process(archivist) + of RequestType.DEBUG: + cast[ptr NodeDebugRequest](request[].reqContent).process(archivist) + of P2P: + cast[ptr NodeP2PRequest](request[].reqContent).process(archivist) + of STORAGE: + cast[ptr NodeStorageRequest](request[].reqContent).process(archivist) + of DOWNLOAD: + let onChunk = proc(bytes: seq[byte]) = + if bytes.len > 0: + request[].callback( + RET_PROGRESS, + cast[ptr cchar](unsafeAddr bytes[0]), + cast[csize_t](bytes.len), + request[].userData, + ) + + cast[ptr NodeDownloadRequest](request[].reqContent).process(archivist, onChunk) + of UPLOAD: + let onBlockReceived = proc(bytes: int) = + request[].callback(RET_PROGRESS, nil, cast[csize_t](bytes), request[].userData) + + cast[ptr NodeUploadRequest](request[].reqContent).process( + archivist, onBlockReceived + ) + + handleRes(await retFut, request) + +proc `$`*(self: ArchivistThreadRequest): string = + return $self.reqType diff --git a/library/archivist_thread_requests/requests/node_debug_request.nim b/library/archivist_thread_requests/requests/node_debug_request.nim new file mode 100644 index 00000000..d1534ac2 --- /dev/null +++ b/library/archivist_thread_requests/requests/node_debug_request.nim @@ -0,0 +1,106 @@ +## This file contains the debug request types that will be handled. +## DEBUG: get debug information about the node. +## LOG_LEVEL: set the log level at runtime. +## CONNECTED_PEERS: get the number of connected peers. +## CONNECTED_PEER_IDS: get the list of connected peer IDs. +## FIND_PEER: find a peer by ID using DHT discovery. +## PEER_INFO: get information about a specific peer. +## DISCONNECT: disconnect from a specific peer. + +# TODO: Debug requests processing still need to be implemented + +{.push raises: [].} + +import std/json +import std/strutils +import chronos +import chronicles +import results +import libp2p +import ../../alloc +import ../../ffi_types + +from "../../../archivist/archivist" import NodeServer +from ../../../archivist/node import switch, discovery + +logScope: + topics = "libarchivist libarchivistdebug" + +type NodeDebugMsgType* = enum + DEBUG + LOG_LEVEL + CONNECTED_PEERS + CONNECTED_PEER_IDS + FIND_PEER + PEER_INFO + DISCONNECT + +type NodeDebugRequest* = object + operation: NodeDebugMsgType + data: cstring + +proc createShared*( + T: type NodeDebugRequest, op: NodeDebugMsgType, data: cstring = "" +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].data = data.alloc() + return ret + +proc destroyShared(self: ptr NodeDebugRequest) = + deallocShared(self[].data) + deallocShared(self) + +proc process*( + self: ptr NodeDebugRequest, archivist: ptr NodeServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of DEBUG: + var debugInfo = %*{ + "version": "0.1.0", + "status": "running" + } + return ok($debugInfo) + + of LOG_LEVEL: + let level = $self.data + info "Log level change requested", level = level + return ok("ok") + + of CONNECTED_PEERS: + if archivist[].isNil: + return err("Archivist node is not initialized") + return ok("0") + + of CONNECTED_PEER_IDS: + if archivist[].isNil: + return err("Archivist node is not initialized") + let json = %*{"peerIds": []} + return ok($json) + + of FIND_PEER: + if archivist[].isNil: + return err("Archivist node is not initialized") + let peerIdStr = $self.data + if peerIdStr == "": + return err("Peer ID is required") + return ok("{}") + + of PEER_INFO: + if archivist[].isNil: + return err("Archivist node is not initialized") + let peerIdStr = $self.data + if peerIdStr == "": + return err("Peer ID is required") + return ok("{}") + + of DISCONNECT: + if archivist[].isNil: + return err("Archivist node is not initialized") + let peerIdStr = $self.data + if peerIdStr == "": + return err("Peer ID is required") + return ok("disconnected") diff --git a/library/archivist_thread_requests/requests/node_download_request.nim b/library/archivist_thread_requests/requests/node_download_request.nim new file mode 100644 index 00000000..5008da4c --- /dev/null +++ b/library/archivist_thread_requests/requests/node_download_request.nim @@ -0,0 +1,306 @@ +## This file contains the download request. +## A session is created for each download identified by the CID, +## allowing to resume, pause and cancel the download (using chunks). +## +## There are two ways to download a file: +## 1. Via chunks: the cid parameter is the CID of the file to download. Steps are: +## - INIT: initializes the download session +## - CHUNK: downloads the next chunk of the file +## - CANCEL: cancels the download session +## 2. Via stream. +## - INIT: initializes the download session +## - STREAM: downloads the file in a streaming manner, calling +## the onChunk handler for each chunk and / or writing to a file if filepath is set. +## - CANCEL: cancels the download session + +{.push raises: [].} + +import std/[options, streams, tables] +import chronos +import chronicles +import libp2p/stream/[lpstream] +import serde/json as serde +import ../../alloc +import ../../../archivist/units +import ../../../archivist/archivisttypes +import ../../../archivist/manifest/manifest + +from "../../../archivist/archivist" import NodeServer +from "../../../archivist/node" import retrieve, fetchManifest +from libp2p import Cid, init, `$` + +logScope: + topics = "libarchivist libarchivistdownload" + +type NodeDownloadMsgType* = enum + INIT + CHUNK + STREAM + CANCEL + MANIFEST + +type OnChunkHandler = proc(bytes: seq[byte]): void {.gcsafe, raises: [].} + +type NodeDownloadRequest* = object + operation: NodeDownloadMsgType + cid: cstring + chunkSize: csize_t + local: bool + filepath: cstring + +type + DownloadSessionId* = string + DownloadSessionCount* = int + DownloadSession* = object + stream: LPStream + chunkSize: int + +var downloadSessions {.threadvar.}: Table[DownloadSessionId, DownloadSession] + +proc createShared*( + T: type NodeDownloadRequest, + op: NodeDownloadMsgType, + cid: string = "", + chunkSize: int = 0, + local: bool = false, + filepath: string = "" +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].cid = cid.alloc() + ret[].chunkSize = csize_t(chunkSize) + ret[].local = local + ret[].filepath = filepath.alloc() + + return ret + +proc destroyShared(self: ptr NodeDownloadRequest) = + deallocShared(self[].cid) + deallocShared(self[].filepath) + deallocShared(self) + +proc init( + archivist: ptr NodeServer, cCid: cstring = "", chunkSize: csize_t = 0, local: bool +): Future[Result[string, string]] {.async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to download: cannot parse cid: " & $cCid) + + if downloadSessions.contains($cid): + return ok("Download session already exists.") + + var stream: LPStream + + try: + let res = await archivist[].archivistNode.retrieve(cid.get(), local) + if res.isErr(): + return err("Failed to init the download: " & res.error.msg) + stream = res.get() + except CancelledError: + downloadSessions.del($cid) + return err("Failed to init the download: download cancelled.") + + let blockSize = if chunkSize.int > 0: chunkSize.int else: DefaultBlockSize.int + downloadSessions[$cid] = DownloadSession(stream: stream, chunkSize: blockSize) + + return ok("") + +proc chunk( + archivist: ptr NodeServer, cCid: cstring = "", onChunk: OnChunkHandler +): Future[Result[string, string]] {.async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to download chunk: cannot parse cid: " & $cCid) + + if not downloadSessions.contains($cid): + return err("Failed to download chunk: no session for cid " & $cid) + + var session: DownloadSession + try: + session = downloadSessions[$cid] + except KeyError: + return err("Failed to download chunk: no session for cid " & $cid) + + let stream = session.stream + if stream.atEof: + return ok("") + + let chunkSize = session.chunkSize + var buf = newSeq[byte](chunkSize) + + try: + let read = await stream.readOnce(addr buf[0], buf.len) + buf.setLen(read) + except LPStreamError as e: + await stream.close() + downloadSessions.del($cid) + return err("Failed to download chunk: " & $e.msg) + except CancelledError: + await stream.close() + downloadSessions.del($cid) + return err("Failed to download chunk: download cancelled.") + + if buf.len <= 0: + return err("Failed to download chunk: no data") + + onChunk(buf) + + return ok("") + +proc streamData( + archivist: ptr NodeServer, + stream: LPStream, + onChunk: OnChunkHandler, + chunkSize: csize_t, + filepath: cstring, +): Future[Result[string, string]] {. + async: (raises: [CancelledError, LPStreamError, IOError]) +.} = + let blockSize = if chunkSize.int > 0: chunkSize.int else: DefaultBlockSize.int + var buf = newSeq[byte](blockSize) + var read = 0 + var outputStream: OutputStreamHandle + var filedest: string = $filepath + + try: + if filepath != "": + outputStream = filedest.fileOutput() + + while not stream.atEof: + await sleepAsync(0) + + let read = await stream.readOnce(addr buf[0], buf.len) + buf.setLen(read) + + if buf.len <= 0: + break + + onChunk(buf) + + if outputStream != nil: + outputStream.write(buf) + + if outputStream != nil: + outputStream.close() + finally: + if outputStream != nil: + outputStream.close() + + return ok("") + +proc stream( + archivist: ptr NodeServer, + cCid: cstring, + chunkSize: csize_t, + local: bool, + filepath: cstring, + onChunk: OnChunkHandler, +): Future[Result[string, string]] {.raises: [], async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to stream: cannot parse cid: " & $cCid) + + if not downloadSessions.contains($cid): + return err("Failed to stream: no session for cid " & $cid) + + var session: DownloadSession + try: + session = downloadSessions[$cid] + except KeyError: + return err("Failed to stream: no session for cid " & $cid) + + try: + let res = + await noCancel archivist.streamData(session.stream, onChunk, chunkSize, filepath) + if res.isErr: + return err($res.error) + except LPStreamError as e: + return err("Failed to stream file: " & $e.msg) + except IOError as e: + return err("Failed to stream file: " & $e.msg) + finally: + if session.stream != nil: + await session.stream.close() + downloadSessions.del($cid) + + return ok("") + +proc cancel( + archivist: ptr NodeServer, cCid: cstring +): Future[Result[string, string]] {.raises: [], async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to cancel: cannot parse cid: " & $cCid) + + if not downloadSessions.contains($cid): + return ok("") + + var session: DownloadSession + try: + session = downloadSessions[$cid] + except KeyError: + return ok("") + + let stream = session.stream + await stream.close() + downloadSessions.del($cCid) + + return ok("") + +proc manifest( + archivist: ptr NodeServer, cCid: cstring +): Future[Result[string, string]] {.raises: [], async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to fetch manifest: cannot parse cid: " & $cCid) + + try: + let manifestRes = await archivist[].archivistNode.fetchManifest(cid.get()) + if manifestRes.isErr: + return err("Failed to fetch manifest: " & manifestRes.error.msg) + + return ok(serde.toJson(manifestRes.get())) + except CancelledError: + return err("Failed to fetch manifest: download cancelled.") + +proc process*( + self: ptr NodeDownloadRequest, archivist: ptr NodeServer, onChunk: OnChunkHandler +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of NodeDownloadMsgType.INIT: + let res = (await init(archivist, self.cid, self.chunkSize, self.local)) + if res.isErr: + error "Failed to INIT.", error = res.error + return err($res.error) + return res + of NodeDownloadMsgType.CHUNK: + let res = (await chunk(archivist, self.cid, onChunk)) + if res.isErr: + error "Failed to CHUNK.", error = res.error + return err($res.error) + return res + of NodeDownloadMsgType.STREAM: + let res = ( + await stream( + archivist, self.cid, self.chunkSize, self.local, self.filepath, onChunk + ) + ) + if res.isErr: + error "Failed to STREAM.", error = res.error + return err($res.error) + return res + of NodeDownloadMsgType.CANCEL: + let res = (await cancel(archivist, self.cid)) + if res.isErr: + error "Failed to CANCEL.", error = res.error + return err($res.error) + return res + of NodeDownloadMsgType.MANIFEST: + let res = (await manifest(archivist, self.cid)) + if res.isErr: + error "Failed to MANIFEST.", error = res.error + return err($res.error) + return res diff --git a/library/archivist_thread_requests/requests/node_info_request.nim b/library/archivist_thread_requests/requests/node_info_request.nim new file mode 100644 index 00000000..a26fae76 --- /dev/null +++ b/library/archivist_thread_requests/requests/node_info_request.nim @@ -0,0 +1,69 @@ +## This file contains the node info request types that will be handled. +## VERSION: get the Archivist version. +## REVISION: get the Archivist revision. +## REPO: get the repo (data directory) path. +## PEERID: get the node's peer ID. +## SPR: get the node's Signed Peer Record. + +{.push raises: [].} + +import std/options +import chronos +import chronicles +import results +import pkg/libp2p/switch as libp2p_switch +import ../../alloc + +from "../../../archivist/archivist" import NodeServer +from ../../../archivist/node import ArchivistNodeRef, switch, discovery + +# TODO: Should this really be hardcoded here? +const archivistVersion* = "0.1.0" +const archivistRevision* = "unknown" + +logScope: + topics = "libarchivist libarchivistinfo" + +type NodeInfoMsgType* = enum + VERSION + REVISION + REPO + PEERID + SPR + +type NodeInfoRequest* = object + operation: NodeInfoMsgType + +proc createShared*(T: type NodeInfoRequest, op: NodeInfoMsgType): ptr type T = + var ret = createShared(T) + ret[].operation = op + return ret + +proc destroyShared(self: ptr NodeInfoRequest) = + deallocShared(self) + +proc process*( + self: ptr NodeInfoRequest, archivist: ptr NodeServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of VERSION: + return ok(archivistVersion) + of REVISION: + return ok(archivistRevision) + of REPO: + # TODO: Get actual repo path from config + return ok("") + of PEERID: + if archivist[].isNil: + return err("Archivist node is not initialized") + return ok($archivist[].archivistNode.switch.peerInfo.peerId) + of SPR: + if archivist[].isNil: + return err("Archivist node is not initialized") + let spr = archivist[].archivistNode.discovery().dhtRecord + if spr.isNone: + return err("Failed to get SPR: no SPR record found") + return ok($spr.get()) diff --git a/library/archivist_thread_requests/requests/node_lifecycle_request.nim b/library/archivist_thread_requests/requests/node_lifecycle_request.nim new file mode 100644 index 00000000..67436a18 --- /dev/null +++ b/library/archivist_thread_requests/requests/node_lifecycle_request.nim @@ -0,0 +1,192 @@ +## This file contains the lifecycle request type that will be handled. +## CREATE: create a new Archivist node with the provided config.json. +## START: start the provided Archivist node. +## STOP: stop the provided Archivist node. + +import std/[options, json, strutils, net, os] +import chronos +import chronicles +import results +import confutils +import confutils/std/net +import confutils/defs +import libp2p +import libp2p/routing_record +import json_serialization +import json_serialization/std/[options, net] +import ../../../archivist/conf + +import ../../alloc +import ../../../archivist/conf +import ../../../archivist/utils +import ../../../archivist/utils/[keyutils, fileutils] +import ../../../archivist/units + +from ../../../archivist/archivist import NodeServer, new, start, stop +from ../../../archivist/conf import nodeFullVersion + +logScope: + topics = "libarchivist libarchivistlifecycle" + +type NodeLifecycleMsgType* = enum + CREATE + START + STOP + +proc readValue*[T: InputFile | InputDir | OutPath | OutDir | OutFile]( + r: var JsonReader, val: var T +) {.raises: [SerializationError, IOError].} = + val = T(r.readValue(string)) + +proc readValue*(r: var JsonReader, val: var MultiAddress) {.raises: [SerializationError, IOError].} = + let addrStr = r.readValue(string) + let res = MultiAddress.init(addrStr) + if res.isErr: + raise + newException(SerializationError, "Cannot parse MultiAddress: " & addrStr) + val = res.get() + +proc readValue*(r: var JsonReader, val: var NatConfig) {.raises: [SerializationError, ValueError, IOError].} = + try: + val = NatConfig.parseCmdArg(r.readValue(string)) + except ValueError as e: + raise + newException(SerializationError, "Cannot parse the NAT config: " & e.msg) + +proc readValue*(r: var JsonReader, val: var SignedPeerRecord) {.raises: [SerializationError, IOError].} = + let uri = r.readValue(string) + if not val.fromURI(uri): + raise + newException(SerializationError, "Cannot parse the signed peer record: " & uri) + +proc readValue*(r: var JsonReader, val: var ThreadCount) {.raises: [SerializationError, IOError].} = + val = ThreadCount(r.readValue(int)) + +proc readValue*(r: var JsonReader, val: var NBytes) {.raises: [SerializationError, IOError].} = + val = NBytes(r.readValue(int)) + +proc readValue*(r: var JsonReader, val: var Duration) {.raises: [SerializationError, IOError].} = + var dur: Duration + let input = r.readValue(string) + let count = parseDuration(input, dur) + if count == 0: + raise newException(SerializationError, "Cannot parse the duration: " & input) + val = dur + +type NodeLifecycleRequest* = object + operation: NodeLifecycleMsgType + configJson: cstring + +proc createShared*( + T: type NodeLifecycleRequest, op: NodeLifecycleMsgType, configJson: cstring = "" +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].configJson = configJson.alloc() + return ret + +proc destroyShared(self: ptr NodeLifecycleRequest) = + deallocShared(self[].configJson) + deallocShared(self) + +proc createArchivist( + configJson: cstring +): Future[Result[NodeServer, string]] {.async: (raises: []).} = + var conf: NodeConf + + try: + # TODO: Fix configuration loading serialization issues, remove hardcoded stuff + conf = default(NodeConf) + conf.logLevel = "info" + conf.dataDir = OutDir(defaultDataDir()) + conf.netPrivKeyFile = "key" + conf.maxPeers = 160 + conf.agentString = "Archivist Node" + conf.numThreads = ThreadCount(0) + conf.discoveryPort = Port(8090) + + conf.listenAddrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should init multiaddress")] + + conf.apiBindAddress = "127.0.0.1" + conf.apiPort = Port(8080) + conf.storageQuota = DefaultQuotaBytes + conf.blockTtl = DefaultBlockTtl + conf.blockMaintenanceInterval = DefaultBlockInterval + conf.blockMaintenanceNumberOfBlocks = DefaultNumBlocksPerInterval + + let dataDir = string(conf.dataDir) + if not dirExists(dataDir): + try: + createDir(dataDir) + except CatchableError as e: + # TODO: Should we really ignore the directory creation failure? + discard + except ConfigurationError as e: + return err("Failed to create Archivist: unable to load configuration: " & e.msg) + + conf.setupLogging() + + try: + {.gcsafe.}: + updateLogLevel(conf.logLevel) + except ValueError as err: + return err("Failed to create Archivist: invalid value for log level: " & err.msg) + + conf.setupMetrics() + + if not (checkAndCreateDataDir((conf.dataDir).string)): + return err( + "Failed to create Archivist: unable to access/create data folder or data folder's permissions are insecure." + ) + + if not (checkAndCreateDataDir((conf.dataDir / "repo"))): + return err( + "Failed to create Archivist: unable to access/create data folder or data folder's permissions are insecure." + ) + + let keyPath = + if isAbsolute(conf.netPrivKeyFile): + conf.netPrivKeyFile + else: + conf.dataDir / conf.netPrivKeyFile + let privateKey = setupKey(keyPath) + if privateKey.isErr: + return err("Failed to create Archivist: unable to get the private key.") + let pk = privateKey.get() + + let archivist = + try: + NodeServer.new(conf, pk) + except Exception as exc: + return err("Failed to create Archivist: " & exc.msg) + + return ok(archivist) + +proc process*( + self: ptr NodeLifecycleRequest, archivist: ptr NodeServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of CREATE: + archivist[] = ( + await createArchivist( + self.configJson + ) + ).valueOr: + error "Failed to CREATE.", error = error + return err($error) + of START: + try: + await archivist[].start() + except Exception as e: + error "Failed to START.", error = e.msg + return err(e.msg) + of STOP: + try: + await archivist[].stop() + except Exception as e: + error "Failed to STOP.", error = e.msg + return err(e.msg) + return ok("") diff --git a/library/archivist_thread_requests/requests/node_p2p_request.nim b/library/archivist_thread_requests/requests/node_p2p_request.nim new file mode 100644 index 00000000..458688ab --- /dev/null +++ b/library/archivist_thread_requests/requests/node_p2p_request.nim @@ -0,0 +1,187 @@ +## This file contains the P2P request types that will be handled. +## CONNECT: connect to a peer by ID and/or addresses. +## CONNECTED_PEERS: get the number of connected peers. +## CONNECTED_PEER_IDS: get the list of connected peer IDs. +## FIND_PEER: find a peer by ID using DHT discovery. +## DISCONNECT: disconnect from a specific peer. + +{.push raises: [].} + +import std/[json, sets] +import chronos +import chronicles +import results +import questionable +import pkg/libp2p/switch as libp2p_switch +import ../../alloc + +from "../../../archivist/archivist" import NodeServer +from ../../../archivist/node import ArchivistNodeRef, findPeer, switch + +logScope: + topics = "libarchivist libarchivistp2p" + +type NodeP2PMsgType* = enum + CONNECT + CONNECTED_PEERS + CONNECTED_PEER_IDS + FIND_PEER + DISCONNECT + +type NodeP2PRequest* = object + operation: NodeP2PMsgType + peerId*: string + addresses*: seq[string] + +proc createShared*( + T: type NodeP2PRequest, + op: NodeP2PMsgType, + peerId: string = "", + addresses: seq[string] = @[] +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].peerId = peerId + ret[].addresses = addresses + return ret + +proc destroyShared(self: ptr NodeP2PRequest) = + deallocShared(self) + +proc processConnect( + archivist: ptr NodeServer, + peerId: string, + addresses: seq[string] +): Future[Result[string, string]] {.async: (raises: []).} = + ## Connect to a peer by ID and/or addresses. + if archivist[].isNil: + return err("Archivist node is not initialized") + + let pid = PeerId.init(peerId) + if pid.isErr: + return err("Invalid peer ID: " & peerId) + + var multiAddrs: seq[MultiAddress] + for addr in addresses: + let ma = MultiAddress.init(addr) + if ma.isErr: + continue + multiAddrs.add(ma.get()) + + try: + await archivist[].archivistNode.switch().connect(pid.get(), multiAddrs) + return ok("connected") + except CancelledError: + return err("Connect operation cancelled") + except CatchableError as e: + return err("Failed to connect: " & e.msg) + +proc processConnectedPeers( + archivist: ptr NodeServer +): Future[Result[string, string]] {.async: (raises: []).} = + ## Get the number of connected peers. + if archivist[].isNil: + return err("Archivist node is not initialized") + + try: + let inboundPeers = archivist[].archivistNode.switch().connectedPeers(Direction.In) + let outboundPeers = archivist[].archivistNode.switch().connectedPeers(Direction.Out) + let total = inboundPeers.len + outboundPeers.len + return ok($total) + except CatchableError as e: + return err("Failed to get connected peers: " & e.msg) + +proc processConnectedPeerIds( + archivist: ptr NodeServer +): Future[Result[string, string]] {.async: (raises: []).} = + if archivist[].isNil: + return err("Archivist node is not initialized") + + try: + var peerIdsSet: HashSet[string] + let inboundPeers = archivist[].archivistNode.switch().connectedPeers(Direction.In) + let outboundPeers = archivist[].archivistNode.switch().connectedPeers(Direction.Out) + + for peer in inboundPeers: + peerIdsSet.incl($peer) + for peer in outboundPeers: + peerIdsSet.incl($peer) + + var peerIds: seq[string] = @[] + for pid in peerIdsSet: + peerIds.add(pid) + + let json = %*{"peerIds": peerIds} + return ok($json) + except CatchableError as e: + return err("Failed to get connected peer IDs: " & e.msg) + +proc processFindPeer( + archivist: ptr NodeServer, + peerId: string +): Future[Result[string, string]] {.async: (raises: []).} = + if archivist[].isNil: + return err("Archivist node is not initialized") + + if peerId == "": + return err("Peer ID is required") + + let pid = PeerId.init(peerId) + if pid.isErr: + return err("Invalid peer ID: " & peerId) + + try: + let found = await archivist[].archivistNode.findPeer(pid.get()) + if found.isNone: + return err("Peer not found: " & peerId) + + var addrs: seq[string] = @[] + let peerRecord = found.unsafeGet() + for addrInfo in peerRecord.addresses: + addrs.add($addrInfo.address) + let json = %*{"peerId": peerId, "addresses": addrs} + return ok($json) + except CancelledError: + return err("Find peer operation cancelled") + except CatchableError as e: + return err("Failed to find peer: " & e.msg) + +proc processDisconnect( + archivist: ptr NodeServer, + peerId: string +): Future[Result[string, string]] {.async: (raises: []).} = + if archivist[].isNil: + return err("Archivist node is not initialized") + + if peerId == "": + return err("Peer ID is required") + + let pid = PeerId.init(peerId) + if pid.isErr: + return err("Invalid peer ID: " & peerId) + + try: + await archivist[].archivistNode.switch().disconnect(pid.get()) + return ok("disconnected") + except CancelledError: + return err("Disconnect operation cancelled") + except CatchableError as e: + return err("Failed to disconnect: " & e.msg) + +proc process*( + self: ptr NodeP2PRequest, archivist: ptr NodeServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of CONNECT: + return await processConnect(archivist, self.peerId, self.addresses) + of CONNECTED_PEERS: + return await processConnectedPeers(archivist) + of CONNECTED_PEER_IDS: + return await processConnectedPeerIds(archivist) + of FIND_PEER: + return await processFindPeer(archivist, self.peerId) + of DISCONNECT: + return await processDisconnect(archivist, self.peerId) diff --git a/library/archivist_thread_requests/requests/node_storage_request.nim b/library/archivist_thread_requests/requests/node_storage_request.nim new file mode 100644 index 00000000..a4de2a51 --- /dev/null +++ b/library/archivist_thread_requests/requests/node_storage_request.nim @@ -0,0 +1,185 @@ +## This file contains the node storage request. +## Operations available: +## - LIST: list all manifests stored in the node. +## - DELETE: Deletes either a single block or an entire dataset from the local node. +## - FETCH: download a file from the network to the local node. +## - SPACE: get the amount of space used by the local node. +## - EXISTS: check the existence of a cid in a node (local store). + +{.push raises: [].} + +import std/[options, tables] +import chronos +import chronicles +import libp2p/stream/[lpstream] +import serde/json as serde +import ../../alloc +import ../../../archivist/units +import ../../../archivist/archivisttypes +import ../../../archivist/manifest/manifest + +from "../../../archivist/archivist" import NodeServer +from ../../../archivist/node import + ArchivistNodeRef, iterateManifests, fetchManifest, fetchDatasetAsyncTask, delete +from libp2p import Cid, init, `$` + +logScope: + topics = "libarchivist libarchiviststorage" + +type NodeStorageMsgType* = enum + LIST + DELETE + FETCH + SPACE + EXISTS + +type NodeStorageRequest* = object + operation: NodeStorageMsgType + cid: cstring + +type StorageSpace = object + totalBlocks* {.serialize.}: Natural + quotaMaxBytes* {.serialize.}: NBytes + quotaUsedBytes* {.serialize.}: NBytes + quotaReservedBytes* {.serialize.}: NBytes + +proc createShared*( + T: type NodeStorageRequest, op: NodeStorageMsgType, cid: cstring = "" +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].cid = cid.alloc() + + return ret + +proc destroyShared(self: ptr NodeStorageRequest) = + deallocShared(self[].cid) + deallocShared(self) + +type ManifestWithCid = object + cid {.serialize.}: string + manifest {.serialize.}: Manifest + +proc list( + archivist: ptr NodeServer +): Future[Result[string, string]] {.async: (raises: []).} = + var manifests = newSeq[ManifestWithCid]() + proc onManifest(cid: Cid, manifest: Manifest) {.raises: [], gcsafe.} = + manifests.add(ManifestWithCid(cid: $cid, manifest: manifest)) + + try: + await archivist[].archivistNode.iterateManifests(onManifest) + except CancelledError: + return err("Failed to list manifests: cancelled operation.") + except CatchableError as err: + return err("Failed to list manifest: : " & err.msg) + + return ok(serde.toJson(manifests)) + +proc delete( + archivist: ptr NodeServer, cCid: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to delete the data: cannot parse cid: " & $cCid) + + try: + let res = await archivist[].archivistNode.delete(cid.get()) + if res.isErr: + return err("Failed to delete the data: " & res.error.msg) + except CancelledError: + return err("Failed to delete the data: cancelled operation.") + except CatchableError as err: + return err("Failed to delete the data: " & err.msg) + + return ok("") + +proc fetch( + archivist: ptr NodeServer, cCid: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to fetch the data: cannot parse cid: " & $cCid) + + try: + let manifest = await archivist[].archivistNode.fetchManifest(cid.get()) + if manifest.isErr: + return err("Failed to fetch the data: " & manifest.error.msg) + + archivist[].archivistNode.fetchDatasetAsyncTask(manifest.get()) + + return ok(serde.toJson(manifest.get())) + except CancelledError: + return err("Failed to fetch the data: download cancelled.") + +proc space( + archivist: ptr NodeServer +): Future[Result[string, string]] {.async: (raises: []).} = + try: + # TODO: Implementation missing, need to query repo store + let space = StorageSpace( + totalBlocks: 0, + quotaMaxBytes: 0.NBytes, + quotaUsedBytes: 0.NBytes, + quotaReservedBytes: 0.NBytes, + ) + return ok(serde.toJson(space)) + except CatchableError as err: + return err("Failed to get space: " & err.msg) + +proc hasLocalBlock( + archivist: ptr NodeServer, cid: Cid +): Future[bool] {.async: (raises: []).} = + # TODO: Implement proper block existence check + return false + +proc exists( + archivist: ptr NodeServer, cCid: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to check the data existence: cannot parse cid: " & $cCid) + + try: + let exists = await hasLocalBlock(archivist, cid.get()) + return ok($exists) + except CancelledError: + return err("Failed to check the data existence: operation cancelled.") + +proc process*( + self: ptr NodeStorageRequest, archivist: ptr NodeServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of NodeStorageMsgType.LIST: + let res = (await list(archivist)) + if res.isErr: + error "Failed to LIST.", error = res.error + return err($res.error) + return res + of NodeStorageMsgType.DELETE: + let res = (await delete(archivist, self.cid)) + if res.isErr: + error "Failed to DELETE.", error = res.error + return err($res.error) + return res + of NodeStorageMsgType.FETCH: + let res = (await fetch(archivist, self.cid)) + if res.isErr: + error "Failed to FETCH.", error = res.error + return err($res.error) + return res + of NodeStorageMsgType.SPACE: + let res = (await space(archivist)) + if res.isErr: + error "Failed to SPACE.", error = res.error + return err($res.error) + return res + of NodeStorageMsgType.EXISTS: + let res = (await exists(archivist, self.cid)) + if res.isErr: + error "Failed to EXISTS.", error = res.error + return err($res.error) + return res diff --git a/library/archivist_thread_requests/requests/node_upload_request.nim b/library/archivist_thread_requests/requests/node_upload_request.nim new file mode 100644 index 00000000..2ed13e4a --- /dev/null +++ b/library/archivist_thread_requests/requests/node_upload_request.nim @@ -0,0 +1,303 @@ +## This file contains the upload request. +## A session is created for each upload allowing to resume, +## pause and cancel uploads (using chunks). +## +## There are two ways to upload a file: +## 1. Via chunks: the filepath parameter is the data filename. Steps are: +## - INIT: creates a new upload session and returns its ID. +## - CHUNK: sends a chunk of data to the upload session. +## - FINALIZE: finalizes the upload and returns the CID of the uploaded file. +## - CANCEL: cancels the upload session. +## +## 2. Directly from a file path: the filepath has to be absolute. +## - INIT: creates a new upload session and returns its ID +## - FILE: starts the upload and returns the CID of the uploaded file +## - CANCEL: cancels the upload session. + +{.push raises: [].} + +import std/[options, os, mimetypes] +import chronos +import chronicles +import questionable +import questionable/results +import faststreams/inputs +import libp2p/stream/[bufferstream, lpstream] +import ../../alloc +import ../../../archivist/units +import ../../../archivist/blocktype as bt + +from "../../../archivist/archivist" import NodeServer +from ../../../archivist/node import ArchivistNodeRef, store +from libp2p import Cid, `$` + +logScope: + topics = "libarchivist libarchivistupload" + +type NodeUploadMsgType* = enum + INIT + CHUNK + FINALIZE + CANCEL + FILE + +type OnProgressHandler = proc(bytes: int): void {.gcsafe, raises: [].} + +type NodeUploadRequest* = object + operation: NodeUploadMsgType + sessionId: cstring + filepath: cstring + chunk: seq[byte] + chunkSize: csize_t + +type + UploadSessionId* = string + UploadSessionCount* = int + UploadSession* = object + stream: BufferStream + fut: Future[?!Cid] + filepath: string + chunkSize: int + onProgress: OnProgressHandler + +var uploadSessions {.threadvar.}: Table[UploadSessionId, UploadSession] +var nextUploadSessionCount {.threadvar.}: UploadSessionCount + +proc createShared*( + T: type NodeUploadRequest, + op: NodeUploadMsgType, + data: string = "", + chunk: seq[byte] = @[], + chunkSize: int = 0 +): ptr type T = + var ret = createShared(T) + ret[].operation = op + if op == NodeUploadMsgType.INIT: + ret[].filepath = data.alloc() + ret[].sessionId = "".alloc() + else: + ret[].sessionId = data.alloc() + ret[].filepath = "".alloc() + ret[].chunk = chunk + ret[].chunkSize = csize_t(chunkSize) + return ret + +proc destroyShared(self: ptr NodeUploadRequest) = + deallocShared(self[].filepath) + deallocShared(self[].sessionId) + deallocShared(self) + +proc init( + archivist: ptr NodeServer, filepath: cstring = "", chunkSize: csize_t = 0 +): Future[Result[string, string]] {.async: (raises: []).} = + var filenameOpt, mimetypeOpt = string.none + + if isAbsolute($filepath): + if not fileExists($filepath): + return err( + "Failed to create an upload session, the filepath does not exist: " & $filepath + ) + + if filepath != "": + let (_, name, ext) = splitFile($filepath) + filenameOpt = (name & ext).some + + if ext != "": + let extNoDot = + if ext.len > 0: + ext[1 ..^ 1] + else: + "" + let mime = newMimetypes() + let mimetypeStr = mime.getMimetype(extNoDot, "") + mimetypeOpt = if mimetypeStr == "": string.none else: mimetypeStr.some + + let sessionId = $nextUploadSessionCount + nextUploadSessionCount.inc() + + let stream = BufferStream.new() + let lpStream = LPStream(stream) + + let blockSize = + if chunkSize.NBytes > 0.NBytes: chunkSize.NBytes else: DefaultBlockSize + + let fut = archivist[].archivistNode.store(lpStream, filenameOpt, mimetypeOpt, blockSize) + + uploadSessions[sessionId] = UploadSession( + stream: stream, fut: fut, filepath: $filepath, chunkSize: blockSize.int + ) + + return ok(sessionId) + +proc chunk( + archivist: ptr NodeServer, sessionId: cstring, chunk: seq[byte] +): Future[Result[string, string]] {.async: (raises: []).} = + if not uploadSessions.contains($sessionId): + return err("Failed to upload the chunk, the session is not found: " & $sessionId) + + var fut = newFuture[void]() + + try: + let session = uploadSessions[$sessionId] + + if chunk.len >= session.chunkSize: + uploadSessions[$sessionId].onProgress = proc( + bytes: int + ): void {.gcsafe, raises: [].} = + fut.complete() + await session.stream.pushData(chunk) + else: + fut = session.stream.pushData(chunk) + + await fut + uploadSessions[$sessionId].onProgress = nil + except KeyError: + return err("Failed to upload the chunk, the session is not found: " & $sessionId) + except LPError as e: + return err("Failed to upload the chunk, stream error: " & $e.msg) + except CancelledError: + return err("Failed to upload the chunk, operation cancelled.") + except CatchableError as e: + return err("Failed to upload the chunk: " & $e.msg) + finally: + if not fut.finished(): + fut.cancelSoon() + + return ok("") + +proc finalize( + archivist: ptr NodeServer, sessionId: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + if not uploadSessions.contains($sessionId): + return err("Failed to finalize the upload session, session not found: " & $sessionId) + + var session: UploadSession + try: + session = uploadSessions[$sessionId] + await session.stream.pushEof() + + let res = await session.fut + if res.isErr: + return err("Failed to finalize the upload session: " & res.error().msg) + + return ok($res.get()) + except KeyError: + return err("Failed to finalize the upload session, invalid session ID: " & $sessionId) + except LPStreamError as e: + return err("Failed to finalize the upload session, stream error: " & $e.msg) + except CancelledError: + return err("Failed to finalize the upload session, operation cancelled") + except CatchableError as e: + return err("Failed to finalize the upload session: " & $e.msg) + finally: + if uploadSessions.contains($sessionId): + uploadSessions.del($sessionId) + + if session.fut != nil and not session.fut.finished(): + session.fut.cancelSoon() + +proc cancel( + archivist: ptr NodeServer, sessionId: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + if not uploadSessions.contains($sessionId): + return ok("") + + try: + let session = uploadSessions[$sessionId] + session.fut.cancelSoon() + except KeyError: + return ok("") + + uploadSessions.del($sessionId) + return ok("") + +proc streamFile( + filepath: string, stream: BufferStream, chunkSize: int +): Future[Result[void, string]] {.async: (raises: [CancelledError]).} = + try: + let inputStreamHandle = filepath.fileInput() + let inputStream = inputStreamHandle.implicitDeref + + var buf = newSeq[byte](chunkSize) + while inputStream.readable: + let read = inputStream.readIntoEx(buf) + if read == 0: + break + await stream.pushData(buf[0 ..< read]) + return ok() + except IOError, OSError, LPStreamError: + let e = getCurrentException() + return err("Failed to stream the file: " & $e.msg) + +proc file( + archivist: ptr NodeServer, sessionId: cstring, onProgress: OnProgressHandler +): Future[Result[string, string]] {.async: (raises: []).} = + if not uploadSessions.contains($sessionId): + return err("Failed to upload the file, invalid session ID: " & $sessionId) + + var session: UploadSession + + try: + uploadSessions[$sessionId].onProgress = onProgress + session = uploadSessions[$sessionId] + + let res = await streamFile(session.filepath, session.stream, session.chunkSize) + if res.isErr: + return err("Failed to upload the file: " & res.error) + + return await archivist.finalize(sessionId) + except KeyError: + return err("Failed to upload the file, the session is not found: " & $sessionId) + except LPStreamError, IOError: + let e = getCurrentException() + return err("Failed to upload the file: " & $e.msg) + except CancelledError: + return err("Failed to upload the file, the operation is cancelled.") + except CatchableError as e: + return err("Failed to upload the file: " & $e.msg) + finally: + if uploadSessions.contains($sessionId): + uploadSessions.del($sessionId) + + if session.fut != nil and not session.fut.finished(): + session.fut.cancelSoon() + +proc process*( + self: ptr NodeUploadRequest, + archivist: ptr NodeServer, + onUploadProgress: OnProgressHandler = nil, +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of NodeUploadMsgType.INIT: + let res = (await init(archivist, self.filepath, self.chunkSize)) + if res.isErr: + error "Failed to INIT.", error = res.error + return err($res.error) + return res + of NodeUploadMsgType.CHUNK: + let res = (await chunk(archivist, self.sessionId, self.chunk)) + if res.isErr: + error "Failed to CHUNK.", error = res.error + return err($res.error) + return res + of NodeUploadMsgType.FINALIZE: + let res = (await finalize(archivist, self.sessionId)) + if res.isErr: + error "Failed to FINALIZE.", error = res.error + return err($res.error) + return res + of NodeUploadMsgType.CANCEL: + let res = (await cancel(archivist, self.sessionId)) + if res.isErr: + error "Failed to CANCEL.", error = res.error + return err($res.error) + return res + of NodeUploadMsgType.FILE: + let res = (await file(archivist, self.sessionId, onUploadProgress)) + if res.isErr: + error "Failed to FILE.", error = res.error + return err($res.error) + return res diff --git a/library/config.nims b/library/config.nims new file mode 100644 index 00000000..2c676063 --- /dev/null +++ b/library/config.nims @@ -0,0 +1 @@ +include "../config.nims" diff --git a/library/ffi_types.nim b/library/ffi_types.nim new file mode 100644 index 00000000..7446cf8c --- /dev/null +++ b/library/ffi_types.nim @@ -0,0 +1,65 @@ +## FFI Types and Utilities +## +## This file defines the core types and utilities for the library's foreign +## function interface (FFI), enabling interoperability with external code. + +{.pragma: exported, exportc, cdecl, raises: [].} +{.pragma: callback, cdecl, raises: [], gcsafe.} + +import pkg/results + +################################################################################ +### Exported types + +type ArchivistCallback* = proc( + callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer +) {.cdecl, gcsafe, raises: [].} + +################################################################################ +### Return codes + +const RET_OK*: cint = 0 +const RET_ERR*: cint = 1 +const RET_MISSING_CALLBACK*: cint = 2 +const RET_PROGRESS*: cint = 3 + +################################################################################ +### Helper procedures + +proc success*(callback: ArchivistCallback, msg: string, userData: pointer): cint = + if msg.len > 0: + callback(RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + else: + let empty = "" + callback(RET_OK, unsafeAddr empty[0], 0, userData) + return RET_OK + +proc error*(callback: ArchivistCallback, msg: string, userData: pointer): cint = + let msg = "libarchivist error: " & msg + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return RET_ERR + +proc okOrError*[T]( + callback: ArchivistCallback, res: Result[T, string], userData: pointer +): cint = + if res.isOk: + return RET_OK + return callback.error($res.error, userData) + +proc progress*(callback: ArchivistCallback, data: string, userData: pointer): cint = + callback(RET_PROGRESS, unsafeAddr data[0], cast[csize_t](len(data)), userData) + return RET_OK + +################################################################################ +### FFI utils + +template foreignThreadGc*(body: untyped) = + when declared(setupForeignThreadGc): + setupForeignThreadGc() + + body + + when declared(tearDownForeignThreadGc): + tearDownForeignThreadGc() + +type onDone* = proc() diff --git a/library/libarchivist.h b/library/libarchivist.h new file mode 100644 index 00000000..cb1077ce --- /dev/null +++ b/library/libarchivist.h @@ -0,0 +1,610 @@ +/** + * libarchivist.h - C-exported interface for the Archivist shared library + * + * This file implements the public C API for libarchivist. It acts as the bridge + * between C programs and the internal Nim implementation. + * + * Unless it is explicitly stated otherwise, all functions are asynchronous and execute + * their work on a separate thread, returning results via the provided callback. The + * result code of the function represents the synchronous status of the call itself: + * returning RET_OK if the job has been dispatched to the thread, and RET_ERR in case + * of immediate failure. + * + * The callback function is invoked with the result of the operation, including + * any data or error messages. If the call was successful, `callerRet` will be RET_OK, + * and `msg` will contain the result data. If there was an error, `callerRet` will be RET_ERR, + * and `msg` will contain the error message. + * + * When a function supports progress updates, it may invoke the callback multiple times: + * first with RET_PROGRESS and progress information, and finally with RET_OK or RET_ERR + * upon completion. The msg parameter will a chunk of data for upload and download operations. + * + * `userData` is a pointer provided by the caller that is passed back to the callback + * for context. + */ + +#ifndef __libarchivist__ +#define __libarchivist__ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Return codes for FFI functions + */ +#define RET_OK 0 +#define RET_ERR 1 +#define RET_MISSING_CALLBACK 2 +#define RET_PROGRESS 3 + +/** + * Callback function type for asynchronous operations + * + * @param callerRet The return code (RET_OK, RET_ERR, RET_PROGRESS) + * @param msg The message data (result, error, or progress) + * @param len The length of the message data + * @param userData User-provided context pointer + */ +typedef void (*ArchivistCallback)(int callerRet, const char *msg, size_t len, void *userData); + +/******************************************************************************* + * Context Lifecycle + ******************************************************************************/ + +/** + * Create a new instance of an Archivist node. + * + * @param configJson JSON string with configuration overwriting defaults (can be NULL) + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return Opaque pointer to the ArchivistContext, or NULL on failure + * + * Typical usage: + * ctx = archivist_new(configJson, myCallback, myUserData); + * archivist_start(ctx, ...); + * ... + * archivist_stop(ctx, ...); + * archivist_destroy(ctx, ...); + */ +void *archivist_new( + const char *configJson, + ArchivistCallback callback, + void *userData); + +/** + * Start the Archivist node. + * The node can be started and stopped multiple times. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_create(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Start the Archivist node. + * The node can be started and stopped multiple times. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_start(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Stop the Archivist node. + * The node can be started and stopped multiple times. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_stop(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Close the Archivist node. + * Use this to release resources before destroying the node. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_close(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Destroy an instance of an Archivist node. + * This will free all resources associated with the node. + * The node must be stopped and closed before calling this function. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_destroy(void *ctx, ArchivistCallback callback, void *userData); + +/******************************************************************************* + * Version Information + ******************************************************************************/ + +/** + * Get the Archivist version string. + * This call does not require the node to be started. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_version(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Get the Archivist contracts revision. + * This call does not require the node to be started. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_revision(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Get the repo (data-dir) used by the node. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_repo(void *ctx, ArchivistCallback callback, void *userData); + +/******************************************************************************* + * Debug Operations + ******************************************************************************/ + +/** + * Retrieve debug information (JSON). + * + * Example return structure: + * { + * "id": "...", + * "addrs": ["..."], + * "spr": "", + * "announceAddresses": ["..."], + * "table": { + * "localNode": "", + * "nodes": [...] + * } + * } + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_debug(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Get the node's Signed Peer Record (SPR). + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_spr(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Get the node's peer ID. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_peer_id(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Set the log level at run time. + * + * @param ctx Context pointer from archivist_new + * @param logLevel Log level: "TRACE", "DEBUG", "INFO", "NOTICE", "WARN", "ERROR", or "FATAL" + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_log_level( + void *ctx, + const char *logLevel, + ArchivistCallback callback, + void *userData); + +/******************************************************************************* + * P2P Networking + ******************************************************************************/ + +/** + * Connect to a peer by using peerAddresses if provided, otherwise use peerId. + * Note that the peerId has to be advertised in the DHT for this to work. + * + * @param ctx Context pointer from archivist_new + * @param peerId The peer ID to connect to + * @param peerAddresses Array of multiaddresses to dial (can be NULL) + * @param peerAddressesSize Number of addresses in the array + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_connect( + void *ctx, + const char *peerId, + const char **peerAddresses, + size_t peerAddressesSize, + ArchivistCallback callback, + void *userData); + +/** + * Get the number of connected peers. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result (returns count as string) + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_connected_peers(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Get list of connected peer IDs as JSON array. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_connected_peer_ids(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Find a peer by ID using DHT discovery. + * + * @param ctx Context pointer from archivist_new + * @param peerId The peer ID to find + * @param callback Callback function for the result (returns peer record as JSON) + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_find_peer( + void *ctx, + const char *peerId, + ArchivistCallback callback, + void *userData); + +/** + * Disconnect from a specific peer. + * + * @param ctx Context pointer from archivist_new + * @param peerId The peer ID to disconnect from + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_disconnect( + void *ctx, + const char *peerId, + ArchivistCallback callback, + void *userData); + +/******************************************************************************* + * Upload Operations + ******************************************************************************/ + +/** + * Initialize an upload session for a file. + * + * @param ctx Context pointer from archivist_new + * @param filepath Absolute path for file upload; for chunk uploads it's the file name + * @param chunkSize Chunk size for upload (default: 65536 bytes) + * @param callback Callback function for the result (returns sessionId) + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_upload_init( + void *ctx, + const char *filepath, + size_t chunkSize, + ArchivistCallback callback, + void *userData); + +/** + * Upload a chunk for the given sessionId. + * + * @param ctx Context pointer from archivist_new + * @param sessionId The upload session ID + * @param chunk Pointer to the chunk data + * @param len Length of the chunk data + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_upload_chunk( + void *ctx, + const char *sessionId, + const uint8_t *chunk, + size_t len, + ArchivistCallback callback, + void *userData); + +/** + * Finalize an upload session identified by sessionId. + * + * @param ctx Context pointer from archivist_new + * @param sessionId The upload session ID + * @param callback Callback function for the result (returns CID) + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_upload_finalize( + void *ctx, + const char *sessionId, + ArchivistCallback callback, + void *userData); + +/** + * Cancel an ongoing upload session. + * + * @param ctx Context pointer from archivist_new + * @param sessionId The upload session ID + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_upload_cancel( + void *ctx, + const char *sessionId, + ArchivistCallback callback, + void *userData); + +/** + * Upload the file defined as filepath in the init method. + * + * @param ctx Context pointer from archivist_new + * @param sessionId The upload session ID + * @param callback Callback function for the result (returns CID, may send RET_PROGRESS) + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_upload_file( + void *ctx, + const char *sessionId, + ArchivistCallback callback, + void *userData); + +/******************************************************************************* + * Download Operations + ******************************************************************************/ + +/** + * Initialize a download for cid. + * + * @param ctx Context pointer from archivist_new + * @param cid The content identifier to download + * @param chunkSize Chunk size for download (default: 65536 bytes) + * @param local Whether to attempt local store retrieval only + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_download_init( + void *ctx, + const char *cid, + size_t chunkSize, + bool local, + ArchivistCallback callback, + void *userData); + +/** + * Perform a streaming download for cid. + * + * @param ctx Context pointer from archivist_new + * @param cid The content identifier to download + * @param chunkSize Chunk size for download (default: 65536 bytes) + * @param local Whether to attempt local store retrieval only + * @param filepath If provided, content is written to this file + * @param callback Callback function for the result (may send RET_PROGRESS) + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_download_stream( + void *ctx, + const char *cid, + size_t chunkSize, + bool local, + const char *filepath, + ArchivistCallback callback, + void *userData); + +/** + * Download a chunk for the given cid. + * The chunk will be returned via the callback using RET_PROGRESS. + * + * @param ctx Context pointer from archivist_new + * @param cid The content identifier to download + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_download_chunk( + void *ctx, + const char *cid, + ArchivistCallback callback, + void *userData); + +/** + * Cancel an ongoing download for cid. + * + * @param ctx Context pointer from archivist_new + * @param cid The content identifier to cancel + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_download_cancel( + void *ctx, + const char *cid, + ArchivistCallback callback, + void *userData); + +/** + * Retrieve the manifest for the given cid (JSON). + * + * Example return structure: + * { + * "treeCid": "...", + * "datasetSize": 123456, + * "blockSize": 65536, + * "filename": "example.txt", + * "mimetype": "text/plain", + * "protected": false + * } + * + * @param ctx Context pointer from archivist_new + * @param cid The content identifier + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_download_manifest( + void *ctx, + const char *cid, + ArchivistCallback callback, + void *userData); + +/******************************************************************************* + * Storage Operations + ******************************************************************************/ + +/** + * Retrieve the list of manifests stored in the node. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result (JSON array) + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_list(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Retrieve storage space information (JSON). + * + * Example return structure: + * { + * "totalBlocks": 100000, + * "quotaMaxBytes": 0, + * "quotaUsedBytes": 0, + * "quotaReservedBytes": 0 + * } + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_space(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Delete the content identified by cid. + * + * @param ctx Context pointer from archivist_new + * @param cid The content identifier to delete + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_delete( + void *ctx, + const char *cid, + ArchivistCallback callback, + void *userData); + +/** + * Fetch content identified by cid from the network into local store. + * The download is done in background so the callback will not receive progress updates. + * + * @param ctx Context pointer from archivist_new + * @param cid The content identifier to fetch + * @param callback Callback function for the result + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_fetch( + void *ctx, + const char *cid, + ArchivistCallback callback, + void *userData); + +/** + * Check if content identified by cid exists in local store. + * + * @param ctx Context pointer from archivist_new + * @param cid The content identifier to check + * @param callback Callback function for the result (returns "true" or "false") + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_exists( + void *ctx, + const char *cid, + ArchivistCallback callback, + void *userData); + +/** + * Get total size of locally stored data. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result (returns size in bytes as string) + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_local_size(void *ctx, ArchivistCallback callback, void *userData); + +/** + * Get count of blocks in local storage. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for the result (returns count as string) + * @param userData User-provided context pointer + * @return RET_OK if dispatched, RET_ERR on failure + */ +int archivist_block_count(void *ctx, ArchivistCallback callback, void *userData); + +/******************************************************************************* + * Event Callback + ******************************************************************************/ + +/** + * Set an event callback for global events. + * Reserved for future use. + * + * @param ctx Context pointer from archivist_new + * @param callback Callback function for events + * @param userData User-provided context pointer + */ +void archivist_set_event_callback( + void *ctx, + ArchivistCallback callback, + void *userData); + +#ifdef __cplusplus +} +#endif + +#endif /* __libarchivist__ */ diff --git a/library/libarchivist.nim b/library/libarchivist.nim new file mode 100644 index 00000000..e8796a83 --- /dev/null +++ b/library/libarchivist.nim @@ -0,0 +1,620 @@ +## libarchivist.nim - C-exported interface for the Archivist shared library +## +## This file implements the public C API for libarchivist. +## It acts as the bridge between C programs and the internal Nim implementation. +## +## This file defines: +## - Initialization logic for the Nim runtime (once per process) +## - Thread-safe exported procs callable from C +## - Callback registration and invocation for asynchronous communication + +{.pragma: exported, exportc, cdecl, raises: [].} +{.pragma: callback, cdecl, raises: [], gcsafe.} + +{.passc: "-fPIC".} + +when defined(linux): + {.passl: "-Wl,-soname,libarchivist.so".} + +import std/[atomics, json] +import chronicles +import chronos +import chronos/threadsync +import ./archivist_context +import ./archivist_thread_requests/archivist_thread_request +import ./archivist_thread_requests/requests/node_lifecycle_request +import ./archivist_thread_requests/requests/node_info_request +import ./archivist_thread_requests/requests/node_debug_request +import ./archivist_thread_requests/requests/node_p2p_request +import ./archivist_thread_requests/requests/node_upload_request +import ./archivist_thread_requests/requests/node_download_request +import ./archivist_thread_requests/requests/node_storage_request +import ./ffi_types +import ./alloc + +logScope: + topics = "libarchivist" + +template checkLibarchivistParams*( + ctx: ptr ArchivistContext, callback: ArchivistCallback, userData: pointer +) = + if not isNil(ctx): + ctx[].userData = userData + + if isNil(callback): + return RET_MISSING_CALLBACK + +proc libarchivistNimMain() {.importc.} + +var initialized: Atomic[bool] + +if defined(android): + # Redirect chronicles to Android System logs + when compiles(defaultChroniclesStream.outputs[0].writer): + defaultChroniclesStream.outputs[0].writer = proc( + logLevel: LogLevel, msg: LogOutputStr + ) {.raises: [].} = + echo logLevel, msg + +# Initializes the Nim runtime and foreign-thread GC +proc initializeLibrary() {.exported.} = + if not initialized.exchange(true): + libarchivistNimMain() + when declared(setupForeignThreadGc): + setupForeignThreadGc() + when declared(nimGC_setStackBottom): + var locals {.volatile, noinit.}: pointer + locals = addr(locals) + nimGC_setStackBottom(locals) + +################################################################################ +### Context Lifecycle + +proc archivist_new*( + configJson: cstring, callback: ArchivistCallback, userData: pointer +): pointer {.dynlib, exported.} = + initializeLibrary() + + if isNil(callback): + error "Failed to create Archivist instance: the callback is missing." + return nil + + var ctx = archivist_context.createArchivistContext().valueOr: + let msg = $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return nil + + ctx.userData = userData + + # TODO: Parse configJson and configure the node + + let ack = "Archivist context created" + callback(RET_OK, unsafeAddr ack[0], cast[csize_t](len(ack)), userData) + + return ctx + +proc archivist_create*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE, "") + let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_start*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START, "") + let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_stop*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP, "") + let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_close*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + # TODO: Need to double check this part + let ack = "closed" + callback(RET_OK, unsafeAddr ack[0], cast[csize_t](len(ack)), userData) + return RET_OK + +proc archivist_destroy*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let destroyRes = destroyArchivistContext(ctx) + if destroyRes.isErr: + return callback.error(destroyRes.error, userData) + + let ack = "destroyed" + callback(RET_OK, unsafeAddr ack[0], cast[csize_t](len(ack)), userData) + return RET_OK + +################################################################################ +### Version Information + +proc archivist_version*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeInfoRequest.createShared(NodeInfoMsgType.VERSION) + let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_revision*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeInfoRequest.createShared(NodeInfoMsgType.REVISION) + let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_repo*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeInfoRequest.createShared(NodeInfoMsgType.REPO) + let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +################################################################################ +### Debug Operations + +proc archivist_debug*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeDebugRequest.createShared(NodeDebugMsgType.DEBUG) + let res = ctx.sendRequestToArchivistThread(RequestType.DEBUG, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_spr*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeInfoRequest.createShared(NodeInfoMsgType.SPR) + let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_peer_id*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeInfoRequest.createShared(NodeInfoMsgType.PEERID) + let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_log_level*( + ctx: pointer, logLevel: cstring, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeDebugRequest.createShared(NodeDebugMsgType.LOG_LEVEL, $logLevel) + let res = ctx.sendRequestToArchivistThread(RequestType.DEBUG, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +################################################################################ +### P2P Networking + +proc archivist_connect*( + ctx: pointer, + peerId: cstring, + peerAddresses: cstringArray, + peerAddressesSize: csize_t, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + var addresses: seq[string] = @[] + if not peerAddresses.isNil and peerAddressesSize > 0: + for i in 0 ..< peerAddressesSize.int: + addresses.add($peerAddresses[i]) + + let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECT, $peerId, addresses) + let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_connected_peers*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECTED_PEERS) + let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_connected_peer_ids*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECTED_PEER_IDS) + let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_find_peer*( + ctx: pointer, peerId: cstring, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeP2PRequest.createShared(NodeP2PMsgType.FIND_PEER, $peerId) + let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_disconnect*( + ctx: pointer, peerId: cstring, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeP2PRequest.createShared(NodeP2PMsgType.DISCONNECT, $peerId) + let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +################################################################################ +### Upload Operations + +proc archivist_upload_init*( + ctx: pointer, + filepath: cstring, + chunkSize: csize_t, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeUploadRequest.createShared(NodeUploadMsgType.INIT, $filepath, @[], chunkSize.int) + let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_upload_chunk*( + ctx: pointer, + sessionId: cstring, + chunk: ptr uint8, + len: csize_t, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + var chunkData: seq[byte] = @[] + if not chunk.isNil and len > 0: + chunkData = newSeq[byte](len.int) + copyMem(addr chunkData[0], chunk, len.int) + + let req = NodeUploadRequest.createShared(NodeUploadMsgType.CHUNK, $sessionId, chunkData) + let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_upload_finalize*( + ctx: pointer, + sessionId: cstring, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeUploadRequest.createShared(NodeUploadMsgType.FINALIZE, $sessionId) + let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_upload_cancel*( + ctx: pointer, + sessionId: cstring, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeUploadRequest.createShared(NodeUploadMsgType.CANCEL, $sessionId) + let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_upload_file*( + ctx: pointer, + sessionId: cstring, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeUploadRequest.createShared(NodeUploadMsgType.FILE, $sessionId) + let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +################################################################################ +### Download Operations + +proc archivist_download_init*( + ctx: pointer, + cid: cstring, + chunkSize: csize_t, + local: bool, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.INIT, $cid, chunkSize.int, local) + let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_download_stream*( + ctx: pointer, + cid: cstring, + chunkSize: csize_t, + local: bool, + filepath: cstring, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + var fp = "" + if not filepath.isNil: + fp = $filepath + let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.STREAM, $cid, chunkSize.int, local, fp) + let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_download_chunk*( + ctx: pointer, + cid: cstring, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CHUNK, $cid) + let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_download_cancel*( + ctx: pointer, + cid: cstring, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CANCEL, $cid) + let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_download_manifest*( + ctx: pointer, + cid: cstring, + callback: ArchivistCallback, + userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.MANIFEST, $cid) + let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +################################################################################ +### Storage Operations + +proc archivist_list*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeStorageRequest.createShared(NodeStorageMsgType.LIST) + let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_space*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) + let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_delete*( + ctx: pointer, cid: cstring, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeStorageRequest.createShared(NodeStorageMsgType.DELETE, cid) + let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_fetch*( + ctx: pointer, cid: cstring, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeStorageRequest.createShared(NodeStorageMsgType.FETCH, cid) + let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_exists*( + ctx: pointer, cid: cstring, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeStorageRequest.createShared(NodeStorageMsgType.EXISTS, cid) + let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_local_size*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) + let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +proc archivist_block_count*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +): cint {.dynlib, exported.} = + checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) + + let ctx = cast[ptr ArchivistContext](ctx) + let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) + let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) + if res.isErr: + deallocShared(req) + return callback.error(res.error, userData) + return RET_OK + +################################################################################ +### Event Callback + +proc archivist_set_event_callback*( + ctx: pointer, callback: ArchivistCallback, userData: pointer +) {.dynlib, exported.} = + let ctx = cast[ptr ArchivistContext](ctx) + if not ctx.isNil: + ctx.eventCallback = cast[pointer](callback) + ctx.eventUserData = userData diff --git a/library/test_ffi.c b/library/test_ffi.c new file mode 100644 index 00000000..9d2896dd --- /dev/null +++ b/library/test_ffi.c @@ -0,0 +1,452 @@ +/* test_ffi.c - Simple C test program for libarchivist FFI + * + * This program tests the basic FFI functionality to ensure the library works correctly. + */ + +#include +#include +#include +#include +#include "libarchivist.h" + +static int callback_status = 0; +static char* callback_data = NULL; +static size_t callback_data_len = 0; +static void* callback_user_data = NULL; + +void test_callback(int status, const char* data, size_t len, void* userData) { + callback_status = status; + if (data && len > 0) { + if (callback_data) { + free(callback_data); + } + callback_data = malloc(len + 1); + if (callback_data) { + memcpy(callback_data, data, len); + callback_data[len] = '\0'; + callback_data_len = len; + } + } else { + callback_data_len = 0; + } + callback_user_data = userData; +} + +int test_create_context() { + printf("Test: Create and destroy context\n"); + + void* ctx = archivist_new("{}", test_callback, (void*)0x1234); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + printf(" PASSED: Context created\n"); + + sleep(1); + + int result = archivist_destroy(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_destroy returned %d\n", result); + return 1; + } + printf(" PASSED: Context destroyed\n"); + + return 0; +} + +int test_version() { + printf("Test: Get version\n"); + + void* ctx = archivist_new("{}", test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + + sleep(1); + + int result = archivist_version(ctx, test_callback, (void*)0x5678); + if (result != 0) { + printf(" FAILED: archivist_version returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + if (callback_data && callback_data_len > 0) { + printf(" PASSED: Version: %s\n", callback_data); + } else { + printf(" WARNING: No version data received\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_peer_id() { + printf("Test: Get peer ID\n"); + + void* ctx = archivist_new("{}", test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + + sleep(1); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_create returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Create callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + callback_data_len = 0; + } + + result = archivist_peer_id(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_peer_id returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + if (callback_data && callback_data_len > 0) { + printf(" PASSED: Peer ID: %s\n", callback_data); + } else { + printf(" WARNING: No peer ID data received\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_debug() { + printf("Test: Debug\n"); + + void* ctx = archivist_new("{}", test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + + sleep(1); + + int result = archivist_debug(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_debug returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + if (callback_data && callback_data_len > 0) { + printf(" PASSED: Debug info received\n"); + } else { + printf(" WARNING: No debug data received\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_connected_peers() { + printf("Test: Connected peers\n"); + + void* ctx = archivist_new("{}", test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + + sleep(1); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_create returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Create callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + callback_data_len = 0; + } + + result = archivist_connected_peers(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_connected_peers returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + if (callback_data && callback_data_len > 0) { + printf(" PASSED: Connected peers: %s\n", callback_data); + } else { + printf(" PASSED: No connected peers (expected)\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_storage_list() { + printf("Test: Storage list\n"); + + void* ctx = archivist_new("{}", test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + + sleep(1); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_create returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Create callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + callback_data_len = 0; + } + + result = archivist_list(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_list returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + if (callback_data && callback_data_len > 0) { + printf(" PASSED: Storage list: %s\n", callback_data); + } else { + printf(" PASSED: Empty storage list (expected)\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_storage_space() { + printf("Test: Storage space\n"); + + void* ctx = archivist_new("{}", test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + + sleep(1); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_create returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Create callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + callback_data_len = 0; + } + + result = archivist_space(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_space returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + if (callback_data && callback_data_len > 0) { + printf(" PASSED: Storage space: %s\n", callback_data); + } else { + printf(" WARNING: No storage space data received\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_start_stop() { + printf("Test: Start and stop\n"); + + void* ctx = archivist_new("{}", test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + + sleep(1); + + int result = archivist_start(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_start returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(2); + + if (callback_status != 0) { + printf(" FAILED: Start callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + printf(" PASSED: Node started\n"); + + result = archivist_stop(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_stop returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(2); + + if (callback_status != 0) { + printf(" FAILED: Stop callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + printf(" PASSED: Node stopped\n"); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int main(int argc, char** argv) { + (void)argc; + (void)argv; + + printf("=== Archivist FFI Test Suite ===\n\n"); + + int failed = 0; + + failed += test_create_context(); + printf("\n"); + + failed += test_version(); + printf("\n"); + + failed += test_peer_id(); + printf("\n"); + + failed += test_debug(); + printf("\n"); + + failed += test_connected_peers(); + printf("\n"); + + failed += test_storage_list(); + printf("\n"); + + failed += test_storage_space(); + printf("\n"); + + failed += test_start_stop(); + printf("\n"); + + if (callback_data) { + free(callback_data); + } + + printf("=== Test Summary ===\n"); + if (failed == 0) { + printf("All tests PASSED\n"); + return 0; + } else { + printf("%d test(s) FAILED\n", failed); + return 1; + } +} From c24354999fcecbfe32a2d9f68b3a70997d51f576 Mon Sep 17 00:00:00 2001 From: Xav Date: Fri, 6 Mar 2026 06:54:43 -0500 Subject: [PATCH 02/16] feat(ffi): implement loading config from toml and serialization to toml using custom serializer --- archivist/archivist.nim | 12 + archivist/conf.nim | 68 +++++- archivist/conf_serialization.nim | 151 +++++++++++++ .../requests/node_info_request.nim | 5 +- .../requests/node_lifecycle_request.nim | 107 +++------ library/libarchivist.h | 11 +- library/libarchivist.nim | 17 +- library/test_ffi.c | 185 +++++++++++++++- tests/test_conf_serialization.nim | 209 ++++++++++++++++++ 9 files changed, 670 insertions(+), 95 deletions(-) create mode 100644 archivist/conf_serialization.nim create mode 100644 tests/test_conf_serialization.nim diff --git a/archivist/archivist.nim b/archivist/archivist.nim index a490f792..13e8479a 100644 --- a/archivist/archivist.nim +++ b/archivist/archivist.nim @@ -53,6 +53,18 @@ type NodePrivateKey* = libp2p.PrivateKey # alias +func node*(self: NodeServer): ArchivistNodeRef = + return self.archivistNode + +func repoStore*(self: NodeServer): RepoStore = + return self.repoStore + +func dataDir*(self: NodeServer): string = + return string(self.config.dataDir) + +func config*(self: NodeServer): NodeConf = + return self.config + proc connectMarketplace(s: NodeServer) {.async.} = let config = s.config diff --git a/archivist/conf.nim b/archivist/conf.nim index 34966a51..e9851da9 100644 --- a/archivist/conf.nim +++ b/archivist/conf.nim @@ -20,7 +20,12 @@ import pkg/chronicles/helpers import pkg/chronicles/topics_registry import pkg/confutils/defs import pkg/confutils/std/net +import pkg/confutils/toml/defs import pkg/toml_serialization +import pkg/serialization + +type ConfTypes = InputFile | InputDir | OutPath | OutDir | OutFile +serializesAsBase(ConfTypes, Toml) import pkg/metrics import pkg/metrics/chronos_httpserver import pkg/stew/byteutils @@ -661,7 +666,67 @@ proc readValue*( except CatchableError as err: raise newException(SerializationError, err.msg) -# no idea why confutils needs this: +# TOML Serialization readValue procedures +proc readValue*(r: var TomlReader, value: var IpAddress) {.raises: [SerializationError, TomlError, IOError].} = + try: + value = parseIpAddress(r.parseAsString()) + except ValueError as ex: + raise newException(SerializationError, ex.msg) + +proc readValue*(r: var TomlReader, value: var Port) {.raises: [SerializationError, TomlError, IOError].} = + value = r.parseInt(int).Port + +# TOML Serialization writeValue procedures +proc writeValue*(w: var TomlWriter, value: ThreadCount) {.raises: [IOError].} = + w.writeValue(int(value)) + +proc writeValue*(w: var TomlWriter, value: NBytes) {.raises: [IOError].} = + w.writeValue(int(value)) + +proc writeValue*(w: var TomlWriter, value: Duration) {.raises: [IOError].} = + w.writeValue($value) + +proc writeValue*(w: var TomlWriter, value: IpAddress) {.raises: [IOError].} = + w.writeValue($value) + +proc writeValue*(w: var TomlWriter, value: Port) {.raises: [IOError].} = + w.writeValue(int(value)) + +proc writeValue*(w: var TomlWriter, value: MultiAddress) {.raises: [IOError].} = + w.writeValue($value) + +proc writeValue*(w: var TomlWriter, value: EthAddress) {.raises: [IOError].} = + w.writeValue($value) + +proc writeValue*(w: var TomlWriter, value: SignedPeerRecord) {.raises: [IOError].} = + w.writeValue($value) + +proc writeValue*(w: var TomlWriter, value: NatConfig) {.raises: [IOError].} = + if value.hasExtIp: + w.writeValue("extip:" & $value.extIp) + else: + case value.nat + of NatStrategy.NatAny: + w.writeValue("any") + of NatStrategy.NatNone: + w.writeValue("none") + of NatStrategy.NatUpnp: + w.writeValue("upnp") + of NatStrategy.NatPmp: + w.writeValue("pmp") + +proc writeValue*(w: var TomlWriter, value: LogKind) {.raises: [IOError].} = + w.writeValue($value) + +proc writeValue*(w: var TomlWriter, value: RepoKind) {.raises: [IOError].} = + w.writeValue($value) + +proc writeValue*(w: var TomlWriter, value: ProverBackendCmd) {.raises: [IOError].} = + w.writeValue($value) + +proc writeValue*(w: var TomlWriter, value: Curves) {.raises: [IOError].} = + w.writeValue($value) + proc completeCmdArg*(T: type EthAddress, val: string): seq[string] = discard @@ -674,7 +739,6 @@ proc completeCmdArg*(T: type Duration, val: string): seq[string] = proc completeCmdArg*(T: type ThreadCount, val: string): seq[string] = discard -# silly chronicles, colors is a compile-time property proc stripAnsi*(v: string): string = var res = newStringOfCap(v.len) diff --git a/archivist/conf_serialization.nim b/archivist/conf_serialization.nim new file mode 100644 index 00000000..a69cac5e --- /dev/null +++ b/archivist/conf_serialization.nim @@ -0,0 +1,151 @@ +## Custom TOML serialization for NodeConf +## +## This module provides a workaround for the TOML serialization library's +## limitation where it treats object types as nested objects instead of +## using custom writeValue procedures. +## +## The TOML library's writeValue procedure for objects automatically creates +## table headers (like [metricsAddress]) for object types, which produces +## malformed TOML. This custom serializer manually constructs the TOML +## string, ensuring that custom writeValue procedures are used. + +{.push raises: [].} + +import std/strutils +import std/options +import pkg/chronos +import pkg/toml_serialization +import pkg/libp2p +import ./conf +import ./logutils +import ./nat +import ./utils/natutils + +proc toToml*(config: NodeConf): string = + var toml = newStringOfCap(16384) + + proc append(key: string, value: string): string = + result = key & " = " & value & "\n" + + proc appendOpt(key: string, value: Option[string]): string = + if value.isSome: + result = key & " = \"" & value.get() & "\"\n" + else: + result = "" + + proc appendOpt(key: string, value: Option[EthAddress]): string = + if value.isSome: + result = key & " = \"" & value.get().short0xHexLog & "\"\n" + else: + result = "" + + proc appendOpt(key: string, value: Option[int]): string = + if value.isSome: + result = key & " = " & $value.get() & "\n" + else: + result = "" + + proc multiAddrToString(ma: MultiAddress): string = + ## Helper function to convert MultiAddress to string + $ma + + # Simple string fields + toml.add append("logLevel", "\"" & config.logLevel & "\"") + toml.add append("logFormat", "\"" & $config.logFormat & "\"") + toml.add append("agentString", "\"" & config.agentString & "\"") + toml.add append("apiBindAddress", "\"" & config.apiBindAddress & "\"") + toml.add append("netPrivKeyFile", "\"" & config.netPrivKeyFile & "\"") + toml.add append("ethProvider", "\"" & config.ethProvider & "\"") + + # Boolean fields + toml.add append("metricsEnabled", if config.metricsEnabled: "true" else: "false") + toml.add append("persistence", if config.persistence: "true" else: "false") + toml.add append("useSystemClock", if config.useSystemClock: "true" else: "false") + toml.add append("validator", if config.validator: "true" else: "false") + toml.add append("prover", if config.prover: "true" else: "false") + toml.add append("circomNoZkey", if config.circomNoZkey: "true" else: "false") + + # Integer fields + toml.add append("metricsPort", $int(config.metricsPort)) + toml.add append("discoveryPort", $int(config.discoveryPort)) + toml.add append("apiPort", $int(config.apiPort)) + toml.add append("maxPeers", $config.maxPeers) + toml.add append("numThreads", $int(config.numThreads)) + toml.add append("blockMaintenanceNumberOfBlocks", $config.blockMaintenanceNumberOfBlocks) + toml.add append("cacheSize", $int(config.cacheSize)) + toml.add append("validatorMaxSlots", $config.validatorMaxSlots) + toml.add append("validatorGroupIndex", $config.validatorGroupIndex) + toml.add append("marketplaceRequestCacheSize", $config.marketplaceRequestCacheSize) + toml.add append("maxPriorityFeePerGas", $config.maxPriorityFeePerGas) + toml.add append("numProofSamples", $config.numProofSamples) + toml.add append("maxSlotDepth", $config.maxSlotDepth) + toml.add append("maxDatasetDepth", $config.maxDatasetDepth) + toml.add append("maxBlockDepth", $config.maxBlockDepth) + toml.add append("maxCellElms", $config.maxCellElms) + + # Complex type fields (using string representations) + toml.add append("metricsAddress", "\"" & $config.metricsAddress & "\"") + toml.add append("dataDir", "\"" & string(config.dataDir) & "\"") + toml.add append("circuitDir", "\"" & string(config.circuitDir) & "\"") + + # NatConfig - use custom serialization logic + let natStr = if config.nat.hasExtIp: + "extip:" & $config.nat.extIp + else: + case config.nat.nat + of NatStrategy.NatAny: "any" + of NatStrategy.NatNone: "none" + of NatStrategy.NatUpnp: "upnp" + of NatStrategy.NatPmp: "pmp" + toml.add append("nat", "\"" & natStr & "\"") + + # Enum fields - need to be quoted as strings + toml.add append("repoKind", "\"" & $config.repoKind & "\"") + toml.add append("proverBackend", "\"" & $config.proverBackend & "\"") + toml.add append("curve", "\"" & $config.curve & "\"") + + # Duration fields - use proper duration string representation + proc formatDuration(d: Duration): string = + let s = $d + if s.len == 0: "0s" else: s + + toml.add append("blockTtl", "\"" & formatDuration(config.blockTtl) & "\"") + toml.add append("blockMaintenanceInterval", "\"" & formatDuration(config.blockMaintenanceInterval) & "\"") + + # NBytes fields + toml.add append("storageQuota", $int(config.storageQuota)) + + # File path fields + toml.add append("circomR1cs", "\"" & string(config.circomR1cs) & "\"") + toml.add append("circomGraph", "\"" & string(config.circomGraph) & "\"") + toml.add append("circomWasm", "\"" & string(config.circomWasm) & "\"") + toml.add append("circomZkey", "\"" & string(config.circomZkey) & "\"") + + # Option fields + toml.add appendOpt("apiCorsAllowedOrigin", config.apiCorsAllowedOrigin) + toml.add appendOpt("logFile", config.logFile) + toml.add appendOpt("ethPrivateKey", config.ethPrivateKey) + toml.add appendOpt("marketplaceAddress", config.marketplaceAddress) + toml.add appendOpt("validatorGroups", config.validatorGroups) + + # MultiAddress array + if config.listenAddrs.len > 0: + toml.add("listenAddrs = [\n") + for la in config.listenAddrs: + toml.add(" \"") + toml.add(multiAddrToString(la)) + toml.add("\",\n") + toml.add("]\n") + + # SignedPeerRecord array + if config.bootstrapNodes.len > 0: + toml.add("bootstrapNodes = [\n") + for node in config.bootstrapNodes: + toml.add(" \"") + toml.add($node) # SignedPeerRecord $ operator returns the string representation + toml.add("\",\n") + toml.add("]\n") + + result = toml + +{.pop.} diff --git a/library/archivist_thread_requests/requests/node_info_request.nim b/library/archivist_thread_requests/requests/node_info_request.nim index a26fae76..0c671db4 100644 --- a/library/archivist_thread_requests/requests/node_info_request.nim +++ b/library/archivist_thread_requests/requests/node_info_request.nim @@ -54,8 +54,9 @@ proc process*( of REVISION: return ok(archivistRevision) of REPO: - # TODO: Get actual repo path from config - return ok("") + if archivist[].isNil: + return err("Archivist node is not initialized") + return ok(string(archivist[].config.dataDir)) of PEERID: if archivist[].isNil: return err("Archivist node is not initialized") diff --git a/library/archivist_thread_requests/requests/node_lifecycle_request.nim b/library/archivist_thread_requests/requests/node_lifecycle_request.nim index 67436a18..aa009951 100644 --- a/library/archivist_thread_requests/requests/node_lifecycle_request.nim +++ b/library/archivist_thread_requests/requests/node_lifecycle_request.nim @@ -1,9 +1,9 @@ ## This file contains the lifecycle request type that will be handled. -## CREATE: create a new Archivist node with the provided config.json. +## CREATE: create a new Archivist node with the provided config.toml. ## START: start the provided Archivist node. ## STOP: stop the provided Archivist node. -import std/[options, json, strutils, net, os] +import std/[options, strutils, net, os] import chronos import chronicles import results @@ -11,13 +11,10 @@ import confutils import confutils/std/net import confutils/defs import libp2p -import libp2p/routing_record -import json_serialization -import json_serialization/std/[options, net] +import toml_serialization import ../../../archivist/conf import ../../alloc -import ../../../archivist/conf import ../../../archivist/utils import ../../../archivist/utils/[keyutils, fileutils] import ../../../archivist/units @@ -33,94 +30,60 @@ type NodeLifecycleMsgType* = enum START STOP + + proc readValue*[T: InputFile | InputDir | OutPath | OutDir | OutFile]( - r: var JsonReader, val: var T -) {.raises: [SerializationError, IOError].} = + r: var TomlReader, val: var T +) = val = T(r.readValue(string)) -proc readValue*(r: var JsonReader, val: var MultiAddress) {.raises: [SerializationError, IOError].} = - let addrStr = r.readValue(string) - let res = MultiAddress.init(addrStr) - if res.isErr: - raise - newException(SerializationError, "Cannot parse MultiAddress: " & addrStr) - val = res.get() +proc readValue*(r: var TomlReader, val: var IpAddress) {.raises: [SerializationError, IOError].} = + let s = r.readValue(string) + try: + val = parseIpAddress(s) + except CatchableError: + raise newException(SerializationError, "Invalid IP address: " & s) -proc readValue*(r: var JsonReader, val: var NatConfig) {.raises: [SerializationError, ValueError, IOError].} = +proc readValue*(r: var TomlReader, val: var Port) {.raises: [SerializationError, IOError].} = + let s = r.readValue(string) try: - val = NatConfig.parseCmdArg(r.readValue(string)) - except ValueError as e: - raise - newException(SerializationError, "Cannot parse the NAT config: " & e.msg) - -proc readValue*(r: var JsonReader, val: var SignedPeerRecord) {.raises: [SerializationError, IOError].} = - let uri = r.readValue(string) - if not val.fromURI(uri): - raise - newException(SerializationError, "Cannot parse the signed peer record: " & uri) - -proc readValue*(r: var JsonReader, val: var ThreadCount) {.raises: [SerializationError, IOError].} = - val = ThreadCount(r.readValue(int)) - -proc readValue*(r: var JsonReader, val: var NBytes) {.raises: [SerializationError, IOError].} = - val = NBytes(r.readValue(int)) - -proc readValue*(r: var JsonReader, val: var Duration) {.raises: [SerializationError, IOError].} = - var dur: Duration - let input = r.readValue(string) - let count = parseDuration(input, dur) - if count == 0: - raise newException(SerializationError, "Cannot parse the duration: " & input) - val = dur + val = Port(parseInt(s)) + except CatchableError: + raise newException(SerializationError, "Invalid port number: " & s) type NodeLifecycleRequest* = object operation: NodeLifecycleMsgType - configJson: cstring + configToml: cstring proc createShared*( - T: type NodeLifecycleRequest, op: NodeLifecycleMsgType, configJson: cstring = "" + T: type NodeLifecycleRequest, op: NodeLifecycleMsgType, configToml: cstring = "" ): ptr type T = var ret = createShared(T) ret[].operation = op - ret[].configJson = configJson.alloc() + ret[].configToml = configToml.alloc() return ret proc destroyShared(self: ptr NodeLifecycleRequest) = - deallocShared(self[].configJson) + deallocShared(self[].configToml) deallocShared(self) proc createArchivist( - configJson: cstring + configToml: cstring ): Future[Result[NodeServer, string]] {.async: (raises: []).} = var conf: NodeConf try: - # TODO: Fix configuration loading serialization issues, remove hardcoded stuff - conf = default(NodeConf) - conf.logLevel = "info" - conf.dataDir = OutDir(defaultDataDir()) - conf.netPrivKeyFile = "key" - conf.maxPeers = 160 - conf.agentString = "Archivist Node" - conf.numThreads = ThreadCount(0) - conf.discoveryPort = Port(8090) - - conf.listenAddrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should init multiaddress")] - - conf.apiBindAddress = "127.0.0.1" - conf.apiPort = Port(8080) - conf.storageQuota = DefaultQuotaBytes - conf.blockTtl = DefaultBlockTtl - conf.blockMaintenanceInterval = DefaultBlockInterval - conf.blockMaintenanceNumberOfBlocks = DefaultNumBlocksPerInterval - - let dataDir = string(conf.dataDir) - if not dirExists(dataDir): - try: - createDir(dataDir) - except CatchableError as e: - # TODO: Should we really ignore the directory creation failure? - discard + conf = NodeConf.load( + version = nodeFullVersion, + envVarsPrefix = "archivist", + cmdLine = @[], + secondarySources = proc( + config: NodeConf, sources: auto + ) {.gcsafe, raises: [ConfigurationError].} = + if configToml.len > 0: + sources.addConfigFileContent(Toml, $(configToml)) + , + ) except ConfigurationError as e: return err("Failed to create Archivist: unable to load configuration: " & e.msg) @@ -172,7 +135,7 @@ proc process*( of CREATE: archivist[] = ( await createArchivist( - self.configJson + self.configToml ) ).valueOr: error "Failed to CREATE.", error = error diff --git a/library/libarchivist.h b/library/libarchivist.h index cb1077ce..d90958a4 100644 --- a/library/libarchivist.h +++ b/library/libarchivist.h @@ -58,21 +58,22 @@ typedef void (*ArchivistCallback)(int callerRet, const char *msg, size_t len, vo /** * Create a new instance of an Archivist node. - * - * @param configJson JSON string with configuration overwriting defaults (can be NULL) + * + * @param configToml TOML string with configuration overwriting defaults (can be NULL or empty string) * @param callback Callback function for the result * @param userData User-provided context pointer * @return Opaque pointer to the ArchivistContext, or NULL on failure - * + * * Typical usage: - * ctx = archivist_new(configJson, myCallback, myUserData); + * ctx = archivist_new(configToml, myCallback, myUserData); + * archivist_create(ctx, ...); * archivist_start(ctx, ...); * ... * archivist_stop(ctx, ...); * archivist_destroy(ctx, ...); */ void *archivist_new( - const char *configJson, + const char *configToml, ArchivistCallback callback, void *userData); diff --git a/library/libarchivist.nim b/library/libarchivist.nim index e8796a83..f7cf21d7 100644 --- a/library/libarchivist.nim +++ b/library/libarchivist.nim @@ -71,7 +71,7 @@ proc initializeLibrary() {.exported.} = ### Context Lifecycle proc archivist_new*( - configJson: cstring, callback: ArchivistCallback, userData: pointer + configToml: cstring, callback: ArchivistCallback, userData: pointer ): pointer {.dynlib, exported.} = initializeLibrary() @@ -86,11 +86,16 @@ proc archivist_new*( ctx.userData = userData - # TODO: Parse configJson and configure the node - - let ack = "Archivist context created" - callback(RET_OK, unsafeAddr ack[0], cast[csize_t](len(ack)), userData) - + let reqContent = + NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE, configToml) + + archivist_context.sendRequestToArchivistThread( + ctx, RequestType.LIFECYCLE, reqContent, callback, userData + ).isOkOr: + let msg = $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return nil + return ctx proc archivist_create*( diff --git a/library/test_ffi.c b/library/test_ffi.c index 9d2896dd..8cdae7af 100644 --- a/library/test_ffi.c +++ b/library/test_ffi.c @@ -35,7 +35,7 @@ void test_callback(int status, const char* data, size_t len, void* userData) { int test_create_context() { printf("Test: Create and destroy context\n"); - void* ctx = archivist_new("{}", test_callback, (void*)0x1234); + void* ctx = archivist_new("", test_callback, (void*)0x1234); if (!ctx) { printf(" FAILED: archivist_new returned NULL\n"); return 1; @@ -54,10 +54,165 @@ int test_create_context() { return 0; } +int test_config_null() { + printf("Test: Config with NULL\n"); + + void* ctx = archivist_new(NULL, test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + printf(" PASSED: Context created with NULL config\n"); + + sleep(1); + + // Verify the default data dir is used + int result = archivist_repo(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_repo returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Repo callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + if (callback_data && callback_data_len > 0) { + printf(" PASSED: Default repo: %s\n", callback_data); + } else { + printf(" WARNING: No repo data received\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_config_empty() { + printf("Test: Config with empty string\n"); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + printf(" PASSED: Context created with empty config\n"); + + sleep(1); + + // Verify the default data dir is used + int result = archivist_repo(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_repo returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Repo callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + if (callback_data && callback_data_len > 0) { + printf(" PASSED: Default repo: %s\n", callback_data); + } else { + printf(" WARNING: No repo data received\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_config_custom_data_dir() { + printf("Test: Config with custom data-dir\n"); + + // Use TOML format to set a custom data directory + const char* config = "data-dir = \"/tmp/archivist-test-custom\""; + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + printf(" PASSED: Context created with custom data-dir config\n"); + + sleep(1); + + // Verify the custom data dir is used + int result = archivist_repo(ctx, test_callback, NULL); + if (result != 0) { + printf(" FAILED: archivist_repo returned %d\n", result); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep(1); + + if (callback_status != 0) { + printf(" FAILED: Repo callback status %d\n", callback_status); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + if (callback_data && callback_data_len > 0) { + printf(" PASSED: Custom repo: %s\n", callback_data); + // Verify the path contains our custom directory + if (strstr(callback_data, "archivist-test-custom") != NULL) { + printf(" PASSED: Custom data-dir was applied correctly\n"); + } else { + printf(" FAILED: Custom data-dir was not applied\n"); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + } else { + printf(" FAILED: No repo data received\n"); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_config_invalid() { + printf("Test: Config with invalid TOML\n"); + + // Invalid TOML: missing closing quote + const char* config = "data-dir = \"/tmp/test"; + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + printf(" FAILED: archivist_new returned NULL\n"); + return 1; + } + printf(" PASSED: Context created (async error expected)\n"); + + sleep(2); + + // The error should be reported via callback + if (callback_status != 0) { + printf(" PASSED: Invalid config correctly returned error: %s\n", + callback_data ? callback_data : "unknown"); + } else { + printf(" WARNING: No error reported for invalid config\n"); + } + + // Clean up even if there was an error + if (ctx) { + archivist_destroy(ctx, test_callback, NULL); + } + return 0; +} + int test_version() { printf("Test: Get version\n"); - void* ctx = archivist_new("{}", test_callback, NULL); + void* ctx = archivist_new("", test_callback, NULL); if (!ctx) { printf(" FAILED: archivist_new returned NULL\n"); return 1; @@ -93,7 +248,7 @@ int test_version() { int test_peer_id() { printf("Test: Get peer ID\n"); - void* ctx = archivist_new("{}", test_callback, NULL); + void* ctx = archivist_new("", test_callback, NULL); if (!ctx) { printf(" FAILED: archivist_new returned NULL\n"); return 1; @@ -151,7 +306,7 @@ int test_peer_id() { int test_debug() { printf("Test: Debug\n"); - void* ctx = archivist_new("{}", test_callback, NULL); + void* ctx = archivist_new("", test_callback, NULL); if (!ctx) { printf(" FAILED: archivist_new returned NULL\n"); return 1; @@ -187,7 +342,7 @@ int test_debug() { int test_connected_peers() { printf("Test: Connected peers\n"); - void* ctx = archivist_new("{}", test_callback, NULL); + void* ctx = archivist_new("", test_callback, NULL); if (!ctx) { printf(" FAILED: archivist_new returned NULL\n"); return 1; @@ -245,7 +400,7 @@ int test_connected_peers() { int test_storage_list() { printf("Test: Storage list\n"); - void* ctx = archivist_new("{}", test_callback, NULL); + void* ctx = archivist_new("", test_callback, NULL); if (!ctx) { printf(" FAILED: archivist_new returned NULL\n"); return 1; @@ -303,7 +458,7 @@ int test_storage_list() { int test_storage_space() { printf("Test: Storage space\n"); - void* ctx = archivist_new("{}", test_callback, NULL); + void* ctx = archivist_new("", test_callback, NULL); if (!ctx) { printf(" FAILED: archivist_new returned NULL\n"); return 1; @@ -361,7 +516,7 @@ int test_storage_space() { int test_start_stop() { printf("Test: Start and stop\n"); - void* ctx = archivist_new("{}", test_callback, NULL); + void* ctx = archivist_new("", test_callback, NULL); if (!ctx) { printf(" FAILED: archivist_new returned NULL\n"); return 1; @@ -413,6 +568,20 @@ int main(int argc, char** argv) { int failed = 0; + // Configuration parsing tests + failed += test_config_null(); + printf("\n"); + + failed += test_config_empty(); + printf("\n"); + + failed += test_config_custom_data_dir(); + printf("\n"); + + failed += test_config_invalid(); + printf("\n"); + + // Original tests failed += test_create_context(); printf("\n"); diff --git a/tests/test_conf_serialization.nim b/tests/test_conf_serialization.nim new file mode 100644 index 00000000..b4ea7d77 --- /dev/null +++ b/tests/test_conf_serialization.nim @@ -0,0 +1,209 @@ +## TOML Config Serialization Tests + +import std/options +import pkg/toml_serialization +import pkg/confutils/defs +import pkg/libp2p +import pkg/ethers +import ../archivist/conf +import ../archivist/conf_serialization +import ../archivist/units +import ../archivist/nat +import ../archivist/utils/natutils + +echo "=== Test 1: Simple types ===" +var config1 = default(NodeConf) +config1.dataDir = OutDir("/tmp/test") +config1.logLevel = "debug" +config1.numThreads = ThreadCount(4) +config1.storageQuota = GiBs(1) +config1.blockTtl = 3600.seconds +config1.maxPeers = 50 + +let toml1 = toToml(config1) +let decoded1 = Toml.decode(toml1, NodeConf) + +assert string(decoded1.dataDir) == string(config1.dataDir) +assert decoded1.logLevel == config1.logLevel +assert decoded1.numThreads == config1.numThreads +assert decoded1.storageQuota == config1.storageQuota +assert decoded1.blockTtl == config1.blockTtl +assert decoded1.maxPeers == config1.maxPeers +echo "✓ Test 1 passed\n" + +echo "=== Test 2: Network types ===" +var config2 = default(NodeConf) +config2.metricsAddress = parseIpAddress("192.168.1.100") +config2.metricsPort = Port(9090) +config2.discoveryPort = Port(8090) +config2.apiPort = Port(8080) + +let maResult = MultiAddress.init("/ip4/127.0.0.1/tcp/0") +assert maResult.isOk +config2.listenAddrs = @[maResult.get()] + +let toml2 = toToml(config2) +let decoded2 = Toml.decode(toml2, NodeConf) + +assert decoded2.metricsAddress == config2.metricsAddress +assert decoded2.metricsPort == config2.metricsPort +assert decoded2.discoveryPort == config2.discoveryPort +assert decoded2.apiPort == config2.apiPort +assert decoded2.listenAddrs.len == config2.listenAddrs.len +assert decoded2.listenAddrs[0] == config2.listenAddrs[0] +echo "✓ Test 2 passed\n" + +echo "=== Test 3: Enum types ===" +var config3 = default(NodeConf) +config3.logFormat = LogKind.Json +config3.repoKind = RepoKind.repoSQLite +config3.proverBackend = ProverBackendCmd.circomcompat +config3.curve = Curves.bn128 + +let toml3 = toToml(config3) +let decoded3 = Toml.decode(toml3, NodeConf) + +assert decoded3.logFormat == config3.logFormat +assert decoded3.repoKind == config3.repoKind +assert decoded3.proverBackend == config3.proverBackend +assert decoded3.curve == config3.curve +echo "✓ Test 3 passed\n" + +echo "=== Test 4: Option types ===" +var config4 = default(NodeConf) +config4.apiCorsAllowedOrigin = some("*") +config4.logFile = some("/tmp/archivist.log") +config4.ethPrivateKey = some("/path/to/private.key") +config4.validatorGroups = some(4) + +let toml4 = toToml(config4) +let decoded4 = Toml.decode(toml4, NodeConf) + +assert decoded4.apiCorsAllowedOrigin == config4.apiCorsAllowedOrigin +assert decoded4.logFile == config4.logFile +assert decoded4.ethPrivateKey == config4.ethPrivateKey +assert decoded4.validatorGroups == config4.validatorGroups +echo "✓ Test 4 passed\n" + +echo "=== Test 5: Complex types ===" +var config5 = default(NodeConf) +config5.nat = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) +config5.bootstrapNodes = @[] + +let toml5 = toToml(config5) +let decoded5 = Toml.decode(toml5, NodeConf) + +assert decoded5.nat.hasExtIp == config5.nat.hasExtIp +assert decoded5.nat.nat == config5.nat.nat +assert decoded5.bootstrapNodes.len == config5.bootstrapNodes.len +echo "✓ Test 5 passed\n" + +echo "=== Test 6: File path types ===" +var config6 = default(NodeConf) +config6.dataDir = OutDir("/tmp/archivist_data") +config6.circuitDir = OutDir("/tmp/circuits") +config6.circomR1cs = InputFile("/tmp/circuits/proof_main.r1cs") +config6.circomGraph = InputFile("/tmp/circuits/proof_main.bin") +config6.circomWasm = InputFile("/tmp/circuits/proof_main.wasm") +config6.circomZkey = InputFile("/tmp/circuits/proof_main.zkey") + +let toml6 = toToml(config6) +let decoded6 = Toml.decode(toml6, NodeConf) + +assert string(decoded6.dataDir) == string(config6.dataDir) +assert string(decoded6.circuitDir) == string(config6.circuitDir) +assert string(decoded6.circomR1cs) == string(config6.circomR1cs) +assert string(decoded6.circomGraph) == string(config6.circomGraph) +assert string(decoded6.circomWasm) == string(config6.circomWasm) +assert string(decoded6.circomZkey) == string(config6.circomZkey) +echo "✓ Test 6 passed\n" + +echo "=== Test 7: Full config round-trip ===" +var config7 = default(NodeConf) +config7.dataDir = OutDir("/tmp/archivist_test") +config7.logLevel = "info" +config7.logFormat = LogKind.Colors +config7.metricsEnabled = true +config7.metricsAddress = parseIpAddress("127.0.0.1") +config7.metricsPort = Port(8008) +config7.listenAddrs = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").expect("valid")] +config7.nat = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) +config7.discoveryPort = Port(8090) +config7.netPrivKeyFile = "test_key" +config7.maxPeers = 160 +config7.numThreads = ThreadCount(0) +config7.agentString = "Test Archivist Node" +config7.apiBindAddress = "127.0.0.1" +config7.apiPort = Port(8080) +config7.apiCorsAllowedOrigin = some("*") +config7.repoKind = RepoKind.repoFS +config7.storageQuota = GiBs(10) +config7.blockTtl = 7200.seconds +config7.blockMaintenanceInterval = 300.seconds +config7.blockMaintenanceNumberOfBlocks = 100 +config7.cacheSize = MiBs(512) +config7.logFile = some("/tmp/archivist.log") +config7.persistence = false +config7.ethProvider = "ws://localhost:8545" +config7.validator = false +config7.validatorMaxSlots = 1000 +config7.prover = false +config7.circuitDir = OutDir("/tmp/circuits") +config7.proverBackend = ProverBackendCmd.nimgroth16 +config7.curve = Curves.bn128 +config7.circomR1cs = InputFile("/tmp/circuits/proof_main.r1cs") +config7.circomGraph = InputFile("/tmp/circuits/proof_main.bin") +config7.circomWasm = InputFile("/tmp/circuits/proof_main.wasm") +config7.circomZkey = InputFile("/tmp/circuits/proof_main.zkey") +config7.numProofSamples = 10 +config7.maxSlotDepth = 16 +config7.maxDatasetDepth = 8 +config7.maxBlockDepth = 12 +config7.maxCellElms = 4096 + +let toml7 = toToml(config7) +let decoded7 = Toml.decode(toml7, NodeConf) + +assert string(decoded7.dataDir) == string(config7.dataDir) +assert decoded7.logLevel == config7.logLevel +assert decoded7.logFormat == config7.logFormat +assert decoded7.metricsEnabled == config7.metricsEnabled +assert decoded7.metricsAddress == config7.metricsAddress +assert decoded7.metricsPort == config7.metricsPort +assert decoded7.maxPeers == config7.maxPeers +assert decoded7.numThreads == config7.numThreads +assert decoded7.agentString == config7.agentString +assert decoded7.repoKind == config7.repoKind +assert decoded7.storageQuota == config7.storageQuota +assert decoded7.validator == config7.validator +assert decoded7.prover == config7.prover +assert decoded7.proverBackend == config7.proverBackend +assert decoded7.curve == config7.curve +assert string(decoded7.circuitDir) == string(config7.circuitDir) +assert string(decoded7.circomR1cs) == string(config7.circomR1cs) +assert string(decoded7.circomGraph) == string(config7.circomGraph) +assert string(decoded7.circomWasm) == string(config7.circomWasm) +assert string(decoded7.circomZkey) == string(config7.circomZkey) +echo "✓ Test 7 passed\n" + +echo "=== Test 8: Empty arrays and None options ===" +var config8 = default(NodeConf) +config8.listenAddrs = @[] +config8.bootstrapNodes = @[] +config8.apiCorsAllowedOrigin = none(string) +config8.logFile = none(string) +config8.ethPrivateKey = none(string) +config8.validatorGroups = none(int) + +let toml8 = toToml(config8) +let decoded8 = Toml.decode(toml8, NodeConf) + +assert decoded8.listenAddrs.len == 0 +assert decoded8.bootstrapNodes.len == 0 +assert decoded8.apiCorsAllowedOrigin.isNone +assert decoded8.logFile.isNone +assert decoded8.ethPrivateKey.isNone +assert decoded8.validatorGroups.isNone +echo "✓ Test 8 passed\n" + +echo "\n=== All tests passed! ===" From 53e8269dafa73c876b2b358c15af34b9451fface Mon Sep 17 00:00:00 2001 From: Xav Date: Fri, 6 Mar 2026 06:59:42 -0500 Subject: [PATCH 03/16] fix(library): improve memory cleanup --- .../requests/node_debug_request.nim | 9 +++++-- .../requests/node_download_request.nim | 12 +++++++--- .../requests/node_lifecycle_request.nim | 9 +++++-- .../requests/node_storage_request.nim | 9 +++++-- .../requests/node_upload_request.nim | 12 +++++++--- library/libarchivist.nim | 24 +++++++++++++++++++ 6 files changed, 63 insertions(+), 12 deletions(-) diff --git a/library/archivist_thread_requests/requests/node_debug_request.nim b/library/archivist_thread_requests/requests/node_debug_request.nim index d1534ac2..b7fd51d9 100644 --- a/library/archivist_thread_requests/requests/node_debug_request.nim +++ b/library/archivist_thread_requests/requests/node_debug_request.nim @@ -48,8 +48,13 @@ proc createShared*( return ret proc destroyShared(self: ptr NodeDebugRequest) = - deallocShared(self[].data) - deallocShared(self) + if not self.isNil: + deallocShared(self[].data) + deallocShared(self) + +proc cleanupRequest(self: ptr NodeDebugRequest) = + if not self.isNil: + deallocShared(self[].data) proc process*( self: ptr NodeDebugRequest, archivist: ptr NodeServer diff --git a/library/archivist_thread_requests/requests/node_download_request.nim b/library/archivist_thread_requests/requests/node_download_request.nim index 5008da4c..3d8ee676 100644 --- a/library/archivist_thread_requests/requests/node_download_request.nim +++ b/library/archivist_thread_requests/requests/node_download_request.nim @@ -75,9 +75,15 @@ proc createShared*( return ret proc destroyShared(self: ptr NodeDownloadRequest) = - deallocShared(self[].cid) - deallocShared(self[].filepath) - deallocShared(self) + if not self.isNil: + deallocShared(self[].cid) + deallocShared(self[].filepath) + deallocShared(self) + +proc cleanupRequest(self: ptr NodeDownloadRequest) = + if not self.isNil: + deallocShared(self[].cid) + deallocShared(self[].filepath) proc init( archivist: ptr NodeServer, cCid: cstring = "", chunkSize: csize_t = 0, local: bool diff --git a/library/archivist_thread_requests/requests/node_lifecycle_request.nim b/library/archivist_thread_requests/requests/node_lifecycle_request.nim index aa009951..821d200c 100644 --- a/library/archivist_thread_requests/requests/node_lifecycle_request.nim +++ b/library/archivist_thread_requests/requests/node_lifecycle_request.nim @@ -64,8 +64,13 @@ proc createShared*( return ret proc destroyShared(self: ptr NodeLifecycleRequest) = - deallocShared(self[].configToml) - deallocShared(self) + if not self.isNil: + deallocShared(self[].configToml) + deallocShared(self) + +proc cleanupRequest(self: ptr NodeLifecycleRequest) = + if not self.isNil: + deallocShared(self[].configToml) proc createArchivist( configToml: cstring diff --git a/library/archivist_thread_requests/requests/node_storage_request.nim b/library/archivist_thread_requests/requests/node_storage_request.nim index a4de2a51..d3a128dd 100644 --- a/library/archivist_thread_requests/requests/node_storage_request.nim +++ b/library/archivist_thread_requests/requests/node_storage_request.nim @@ -53,8 +53,13 @@ proc createShared*( return ret proc destroyShared(self: ptr NodeStorageRequest) = - deallocShared(self[].cid) - deallocShared(self) + if not self.isNil: + deallocShared(self[].cid) + deallocShared(self) + +proc cleanupRequest(self: ptr NodeStorageRequest) = + if not self.isNil: + deallocShared(self[].cid) type ManifestWithCid = object cid {.serialize.}: string diff --git a/library/archivist_thread_requests/requests/node_upload_request.nim b/library/archivist_thread_requests/requests/node_upload_request.nim index 2ed13e4a..68b7a4f6 100644 --- a/library/archivist_thread_requests/requests/node_upload_request.nim +++ b/library/archivist_thread_requests/requests/node_upload_request.nim @@ -83,9 +83,15 @@ proc createShared*( return ret proc destroyShared(self: ptr NodeUploadRequest) = - deallocShared(self[].filepath) - deallocShared(self[].sessionId) - deallocShared(self) + if not self.isNil: + deallocShared(self[].filepath) + deallocShared(self[].sessionId) + deallocShared(self) + +proc cleanupRequest(self: ptr NodeUploadRequest) = + if not self.isNil: + deallocShared(self[].filepath) + deallocShared(self[].sessionId) proc init( archivist: ptr NodeServer, filepath: cstring = "", chunkSize: csize_t = 0 diff --git a/library/libarchivist.nim b/library/libarchivist.nim index f7cf21d7..05a3c7c5 100644 --- a/library/libarchivist.nim +++ b/library/libarchivist.nim @@ -93,6 +93,8 @@ proc archivist_new*( ctx, RequestType.LIFECYCLE, reqContent, callback, userData ).isOkOr: let msg = $error + reqContent.cleanupRequest() + deallocShared(reqContent) callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) return nil @@ -107,6 +109,7 @@ proc archivist_create*( let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE, "") let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -120,6 +123,7 @@ proc archivist_start*( let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START, "") let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -133,6 +137,7 @@ proc archivist_stop*( let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP, "") let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -216,6 +221,7 @@ proc archivist_debug*( let req = NodeDebugRequest.createShared(NodeDebugMsgType.DEBUG) let res = ctx.sendRequestToArchivistThread(RequestType.DEBUG, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -255,6 +261,7 @@ proc archivist_log_level*( let req = NodeDebugRequest.createShared(NodeDebugMsgType.LOG_LEVEL, $logLevel) let res = ctx.sendRequestToArchivistThread(RequestType.DEBUG, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -353,6 +360,7 @@ proc archivist_upload_init*( let req = NodeUploadRequest.createShared(NodeUploadMsgType.INIT, $filepath, @[], chunkSize.int) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -376,6 +384,7 @@ proc archivist_upload_chunk*( let req = NodeUploadRequest.createShared(NodeUploadMsgType.CHUNK, $sessionId, chunkData) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -392,6 +401,7 @@ proc archivist_upload_finalize*( let req = NodeUploadRequest.createShared(NodeUploadMsgType.FINALIZE, $sessionId) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -408,6 +418,7 @@ proc archivist_upload_cancel*( let req = NodeUploadRequest.createShared(NodeUploadMsgType.CANCEL, $sessionId) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -424,6 +435,7 @@ proc archivist_upload_file*( let req = NodeUploadRequest.createShared(NodeUploadMsgType.FILE, $sessionId) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -445,6 +457,7 @@ proc archivist_download_init*( let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.INIT, $cid, chunkSize.int, local) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -467,6 +480,7 @@ proc archivist_download_stream*( let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.STREAM, $cid, chunkSize.int, local, fp) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -483,6 +497,7 @@ proc archivist_download_chunk*( let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CHUNK, $cid) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -499,6 +514,7 @@ proc archivist_download_cancel*( let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CANCEL, $cid) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -515,6 +531,7 @@ proc archivist_download_manifest*( let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.MANIFEST, $cid) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -531,6 +548,7 @@ proc archivist_list*( let req = NodeStorageRequest.createShared(NodeStorageMsgType.LIST) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -544,6 +562,7 @@ proc archivist_space*( let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -557,6 +576,7 @@ proc archivist_delete*( let req = NodeStorageRequest.createShared(NodeStorageMsgType.DELETE, cid) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -570,6 +590,7 @@ proc archivist_fetch*( let req = NodeStorageRequest.createShared(NodeStorageMsgType.FETCH, cid) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -583,6 +604,7 @@ proc archivist_exists*( let req = NodeStorageRequest.createShared(NodeStorageMsgType.EXISTS, cid) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -596,6 +618,7 @@ proc archivist_local_size*( let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK @@ -609,6 +632,7 @@ proc archivist_block_count*( let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) if res.isErr: + req.cleanupRequest() deallocShared(req) return callback.error(res.error, userData) return RET_OK From ca5a8bada19a873365cf7f31acff97cebd59801b Mon Sep 17 00:00:00 2001 From: Xav Date: Fri, 6 Mar 2026 07:39:49 -0500 Subject: [PATCH 04/16] feat(lib): improve callback string handling --- library/Makefile | 15 +- library/archivist_context.nim | 8 +- .../archivist_thread_request.nim | 21 +-- library/ffi_types.nim | 82 +++++++-- library/libarchivist.nim | 25 ++- library/tests/test_callback_safety.nim | 168 ++++++++++++++++++ library/{ => tests}/test_ffi.c | 0 library/tests/test_runner.nim | 45 +++++ 8 files changed, 322 insertions(+), 42 deletions(-) create mode 100644 library/tests/test_callback_safety.nim rename library/{ => tests}/test_ffi.c (100%) create mode 100644 library/tests/test_runner.nim diff --git a/library/Makefile b/library/Makefile index 42edcec6..23e0ecef 100644 --- a/library/Makefile +++ b/library/Makefile @@ -17,7 +17,7 @@ LIB_A = $(LIB_DIR)/$(LIB_NAME).a TEST_BIN = $(BIN_DIR)/test_ffi NIM_SOURCES = libarchivist.nim -C_TEST_SOURCES = test_ffi.c +C_TEST_SOURCES = tests/test_ffi.c .PHONY: all all: $(LIB_SO) $(TEST_BIN) @@ -38,13 +38,22 @@ $(LIB_A): $(NIM_SOURCES) | $(LIB_DIR) $(NIM) c $(NIM_FLAGS) --out:$(LIB_A) $(NIM_SOURCES) $(TEST_BIN): $(C_TEST_SOURCES) $(LIB_SO) | $(BIN_DIR) - $(CC) $(CFLAGS) -o $(TEST_BIN) $(C_TEST_SOURCES) $(LDFLAGS) -I. + $(CC) $(CFLAGS) -o $(TEST_BIN) $(C_TEST_SOURCES) $(LDFLAGS) -I. -Itests .PHONY: test test: $(TEST_BIN) @echo "Running FFI tests..." @LD_LIBRARY_PATH=$(LIB_DIR) $(TEST_BIN) +.PHONY: test-nim +test-nim: + @echo "Running Nim tests..." + @cd tests && nim c -r test_runner.nim + +.PHONY: test-all +test-all: test test-nim + @echo "All tests completed." + .PHONY: clean clean: rm -rf $(BUILD_DIR) @@ -73,6 +82,8 @@ help: @echo "Available targets:" @echo " all - Build library and test program (default)" @echo " test - Run FFI tests" + @echo " test-nim - Run Nim tests" + @echo " test-all - Run all tests (FFI + Nim)" @echo " clean - Remove build artifacts" @echo " install - Install library to /usr/local" @echo " uninstall - Remove library from /usr/local" diff --git a/library/archivist_context.nim b/library/archivist_context.nim index c6bac000..75e5f8ec 100644 --- a/library/archivist_context.nim +++ b/library/archivist_context.nim @@ -51,16 +51,12 @@ template callEventCallback(ctx: ptr ArchivistContext, eventName: string, body: u foreignThreadGc: try: let event = body - cast[ArchivistCallback](ctx[].eventCallback)( - RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData - ) + safeCallback(cast[ArchivistCallback](ctx[].eventCallback), RET_OK, event, ctx[].eventUserData) except CatchableError: let msg = "Exception " & eventName & " when calling 'eventCallBack': " & getCurrentExceptionMsg() - cast[ArchivistCallback](ctx[].eventCallback)( - RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData - ) + safeCallback(cast[ArchivistCallback](ctx[].eventCallback), RET_ERR, msg, ctx[].eventUserData) proc sendRequestToArchivistThread*( ctx: ptr ArchivistContext, diff --git a/library/archivist_thread_requests/archivist_thread_request.nim b/library/archivist_thread_requests/archivist_thread_request.nim index efef1893..03894782 100644 --- a/library/archivist_thread_requests/archivist_thread_request.nim +++ b/library/archivist_thread_requests/archivist_thread_request.nim @@ -61,18 +61,15 @@ proc handleRes[T: string | void | seq[byte]]( if msg == "": request[].callback(RET_ERR, nil, cast[csize_t](0), request[].userData) else: - request[].callback( - RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData - ) + safeCallback(request[].callback, RET_ERR, msg, request[].userData) return foreignThreadGc: - var msg: cstring = "" when T is string: - msg = res.get().cstring() - request[].callback( - RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData - ) + let msg = res.get() + safeCallback(request[].callback, RET_OK, msg, request[].userData) + else: + request[].callback(RET_OK, nil, cast[csize_t](0), request[].userData) return proc process*( @@ -95,12 +92,16 @@ proc process*( of DOWNLOAD: let onChunk = proc(bytes: seq[byte]) = if bytes.len > 0: + let sharedBytes = allocSharedSeq(bytes) + request[].callback( RET_PROGRESS, - cast[ptr cchar](unsafeAddr bytes[0]), - cast[csize_t](bytes.len), + cast[ptr cchar](sharedBytes.data), + cast[csize_t](sharedBytes.len), request[].userData, ) + + deallocSharedSeq(sharedBytes) cast[ptr NodeDownloadRequest](request[].reqContent).process(archivist, onChunk) of UPLOAD: diff --git a/library/ffi_types.nim b/library/ffi_types.nim index 7446cf8c..fc7c9d2a 100644 --- a/library/ffi_types.nim +++ b/library/ffi_types.nim @@ -6,7 +6,7 @@ {.pragma: exported, exportc, cdecl, raises: [].} {.pragma: callback, cdecl, raises: [], gcsafe.} -import pkg/results +import ./alloc ################################################################################ ### Exported types @@ -23,31 +23,81 @@ const RET_ERR*: cint = 1 const RET_MISSING_CALLBACK*: cint = 2 const RET_PROGRESS*: cint = 3 +################################################################################ +### Safe callback string handling + +type CallbackString* = object + data*: cstring + len*: csize_t + +proc createCallbackString*(msg: string): CallbackString = + if msg.len == 0: + return CallbackString(data: nil, len: 0) + + let data = allocCString(msg) + return CallbackString(data: data, len: cast[csize_t](msg.len)) + +proc createCallbackString*(msg: cstring): CallbackString = + if msg.isNil: + return CallbackString(data: nil, len: 0) + + let len = len(msg) + if len == 0: + return CallbackString(data: nil, len: 0) + + let data = alloc(msg) + return CallbackString(data: data, len: cast[csize_t](len)) + +proc freeCallbackString*(cbStr: CallbackString) = + if not cbStr.data.isNil: + deallocCString(cbStr.data) + +proc safeCallback*(callback: ArchivistCallback, retCode: cint, cbStr: CallbackString, userData: pointer) = + callback(retCode, cast[ptr cchar](cbStr.data), cbStr.len, userData) + cbStr.freeCallbackString() + +proc safeCallback*(callback: ArchivistCallback, retCode: cint, msg: string, userData: pointer) = + let cbStr = createCallbackString(msg) + safeCallback(callback, retCode, cbStr, userData) + +################################################################################ +### String pointer validation + +proc validateCString*(str: cstring): bool = + return not str.isNil + +proc validateStringPtr*(strPtr: ptr cchar, len: csize_t): bool = + return not strPtr.isNil and len > 0 + +proc safeStringCopy*(src: cstring, maxLen: csize_t): string = + if not validateCString(src): + return "" + + try: + let srcLen = len(src) + let copyLen = min(srcLen, maxLen.int) + if copyLen == 0: + return "" + + result = newString(copyLen) + copyMem(addr result[0], src, copyLen) + except: + result = "" + ################################################################################ ### Helper procedures proc success*(callback: ArchivistCallback, msg: string, userData: pointer): cint = - if msg.len > 0: - callback(RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) - else: - let empty = "" - callback(RET_OK, unsafeAddr empty[0], 0, userData) + safeCallback(callback, RET_OK, msg, userData) return RET_OK proc error*(callback: ArchivistCallback, msg: string, userData: pointer): cint = - let msg = "libarchivist error: " & msg - callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + let fullMsg = "libarchivist error: " & msg + safeCallback(callback, RET_ERR, fullMsg, userData) return RET_ERR -proc okOrError*[T]( - callback: ArchivistCallback, res: Result[T, string], userData: pointer -): cint = - if res.isOk: - return RET_OK - return callback.error($res.error, userData) - proc progress*(callback: ArchivistCallback, data: string, userData: pointer): cint = - callback(RET_PROGRESS, unsafeAddr data[0], cast[csize_t](len(data)), userData) + safeCallback(callback, RET_PROGRESS, data, userData) return RET_OK ################################################################################ diff --git a/library/libarchivist.nim b/library/libarchivist.nim index 05a3c7c5..6bfd325f 100644 --- a/library/libarchivist.nim +++ b/library/libarchivist.nim @@ -79,15 +79,17 @@ proc archivist_new*( error "Failed to create Archivist instance: the callback is missing." return nil + let safeConfig = if validateCString(configToml): safeStringCopy(configToml, 10000) else: "" + var ctx = archivist_context.createArchivistContext().valueOr: let msg = $error - callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + safeCallback(callback, RET_ERR, msg, userData) return nil ctx.userData = userData let reqContent = - NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE, configToml) + NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE, safeConfig) archivist_context.sendRequestToArchivistThread( ctx, RequestType.LIFECYCLE, reqContent, callback, userData @@ -95,7 +97,7 @@ proc archivist_new*( let msg = $error reqContent.cleanupRequest() deallocShared(reqContent) - callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + safeCallback(callback, RET_ERR, msg, userData) return nil return ctx @@ -150,7 +152,7 @@ proc archivist_close*( let ctx = cast[ptr ArchivistContext](ctx) # TODO: Need to double check this part let ack = "closed" - callback(RET_OK, unsafeAddr ack[0], cast[csize_t](len(ack)), userData) + safeCallback(callback, RET_OK, ack, userData) return RET_OK proc archivist_destroy*( @@ -164,7 +166,7 @@ proc archivist_destroy*( return callback.error(destroyRes.error, userData) let ack = "destroyed" - callback(RET_OK, unsafeAddr ack[0], cast[csize_t](len(ack)), userData) + safeCallback(callback, RET_OK, ack, userData) return RET_OK ################################################################################ @@ -258,7 +260,10 @@ proc archivist_log_level*( checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) let ctx = cast[ptr ArchivistContext](ctx) - let req = NodeDebugRequest.createShared(NodeDebugMsgType.LOG_LEVEL, $logLevel) + + let safeLogLevel = if validateCString(logLevel): safeStringCopy(logLevel, 50) else: "INFO" + + let req = NodeDebugRequest.createShared(NodeDebugMsgType.LOG_LEVEL, safeLogLevel) let res = ctx.sendRequestToArchivistThread(RequestType.DEBUG, req, callback, userData) if res.isErr: req.cleanupRequest() @@ -280,12 +285,16 @@ proc archivist_connect*( checkLibarchivistParams(cast[ptr ArchivistContext](ctx), callback, userData) let ctx = cast[ptr ArchivistContext](ctx) + + let safePeerId = if validateCString(peerId): safeStringCopy(peerId, 500) else: "" + var addresses: seq[string] = @[] if not peerAddresses.isNil and peerAddressesSize > 0: for i in 0 ..< peerAddressesSize.int: - addresses.add($peerAddresses[i]) + if validateCString(peerAddresses[i]): + addresses.add(safeStringCopy(peerAddresses[i], 1000)) - let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECT, $peerId, addresses) + let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECT, safePeerId, addresses) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) if res.isErr: deallocShared(req) diff --git a/library/tests/test_callback_safety.nim b/library/tests/test_callback_safety.nim new file mode 100644 index 00000000..978aec41 --- /dev/null +++ b/library/tests/test_callback_safety.nim @@ -0,0 +1,168 @@ +## Test Callback Safety +## +## This file tests the safe string pointer usage in callbacks to ensure +## memory safety and thread safety are properly handled. + +import std/[unittest, strutils, os] +import ffi_types +import alloc + +suite "Callback Safety Tests": + + test "safeCallback with empty string": + var callbackCalled = false + var callbackRetCode: cint + var callbackUserData: pointer + + let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = + callbackCalled = true + callbackRetCode = retCode + callbackUserData = userData + + let userData = cast[pointer](0x12345) + safeCallback(testCallback, RET_OK, "", userData) + + check(callbackCalled) + check(callbackRetCode == RET_OK) + check(callbackUserData == userData) + + test "safeCallback with non-empty string": + var callbackCalled = false + + let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = + callbackCalled = true + # Just verify the callback was called with non-nil message + if not msg.isNil and len > 0: + # Message is valid + discard + + let testMsg = "Hello, World!" + safeCallback(testCallback, RET_ERR, testMsg, nil) + + check(callbackCalled) + + test "createCallbackString with empty string": + let cbStr = createCallbackString("") + check(cbStr.data.isNil) + check(cbStr.len == cast[csize_t](0)) + cbStr.freeCallbackString() + + test "createCallbackString with non-empty string": + let testMsg = "Test message" + let cbStr = createCallbackString(testMsg) + check(not cbStr.data.isNil) + check(cbStr.len == cast[csize_t](testMsg.len)) + cbStr.freeCallbackString() + + test "createCallbackString with cstring": + let testMsg = "C string test" + let cStr = testMsg.cstring + let cbStr = createCallbackString(cStr) + check(not cbStr.data.isNil) + check(cbStr.len == cast[csize_t](testMsg.len)) + cbStr.freeCallbackString() + + test "validateCString with valid string": + let testMsg = "Valid string" + let cStr = testMsg.cstring + check(validateCString(cStr)) + + test "validateCString with nil string": + let cStr: cstring = nil + check(not validateCString(cStr)) + + test "validateStringPtr with valid pointer": + let testMsg = "Valid pointer test" + var msgCopy = testMsg + let msgPtr = cast[ptr cchar](addr msgCopy[0]) + check(validateStringPtr(msgPtr, cast[csize_t](testMsg.len))) + + test "validateStringPtr with nil pointer": + let msgPtr: ptr cchar = nil + check(not validateStringPtr(msgPtr, cast[csize_t](10))) + + test "safeStringCopy with valid cstring": + let testMsg = "Safe copy test" + let cStr = testMsg.cstring + let copied = safeStringCopy(cStr, cast[csize_t](100)) + check(copied == testMsg) + + test "safeStringCopy with nil cstring": + let cStr: cstring = nil + let copied = safeStringCopy(cStr, cast[csize_t](100)) + check(copied == "") + + test "safeStringCopy with length limit": + let testMsg = "This is a long string that should be truncated" + let cStr = testMsg.cstring + let copied = safeStringCopy(cStr, cast[csize_t](10)) + check(copied.len <= 10) + + test "success helper function": + var callbackCalled = false + + let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = + callbackCalled = true + + let result = success(testCallback, "Success message", nil) + check(result == RET_OK) + check(callbackCalled) + + test "error helper function": + var callbackCalled = false + + let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = + callbackCalled = true + + let result = error(testCallback, "Test error", nil) + check(result == RET_ERR) + check(callbackCalled) + + test "progress helper function": + var callbackCalled = false + + let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = + callbackCalled = true + + let result = progress(testCallback, "Progress data", nil) + check(result == RET_OK) + check(callbackCalled) + + test "SharedSeq allocation and deallocation": + let originalSeq = @[1'u8, 2'u8, 3'u8, 4'u8, 5'u8] + var sharedSeq = allocSharedSeq(originalSeq) + + check(sharedSeq.len == originalSeq.len) + check(not sharedSeq.data.isNil) + + # Verify content + for i in 0.. 0: + # Message is valid + discard + + # Call callback multiple times with different messages + for i in 0..<10: + let msg = "Message " & $i + safeCallback(testCallback, RET_OK, msg, nil) + + check(callbackCount == 10) + +when isMainModule: + echo "Running callback safety tests..." \ No newline at end of file diff --git a/library/test_ffi.c b/library/tests/test_ffi.c similarity index 100% rename from library/test_ffi.c rename to library/tests/test_ffi.c diff --git a/library/tests/test_runner.nim b/library/tests/test_runner.nim new file mode 100644 index 00000000..9e47d3c6 --- /dev/null +++ b/library/tests/test_runner.nim @@ -0,0 +1,45 @@ +#!/usr/bin/env nim +## Test runner for library tests + +import os, strutils + +proc runTest(testFile: string): bool = + echo "Running test: ", testFile + let cmd = "nim c -r --hints:off " & testFile + let exitCode = execShellCmd(cmd) + if exitCode == 0: + echo "✓ ", testFile, " passed" + return true + else: + echo "✗ ", testFile, " failed (exit code: ", exitCode, ")" + return false + +proc main() = + let testDir = getCurrentDir() + let nimTests = toSeq(walkFiles(testDir / "test_*.nim")) + + if nimTests.len == 0: + echo "No Nim tests found in ", testDir + return + + echo "Running ", nimTests.len, " Nim test(s)..." + echo "=" .repeat(50) + + var passed = 0 + var failed = 0 + + for test in nimTests: + if runTest(test): + inc passed + else: + inc failed + echo "" + + echo "=" .repeat(50) + echo "Results: ", passed, " passed, ", failed, " failed" + + if failed > 0: + quit(1) + +when isMainModule: + main() \ No newline at end of file From 0fa8c68dad17744bdc3885c354c1df3874e873a3 Mon Sep 17 00:00:00 2001 From: Xav Date: Fri, 6 Mar 2026 07:49:02 -0500 Subject: [PATCH 05/16] feat(lib): standardize callback error handling --- .../archivist_thread_request.nim | 6 +- library/ffi_types.nim | 82 ++++++ library/libarchivist.nim | 256 ++++++------------ library/test_error_handling.nim | 69 +++++ 4 files changed, 244 insertions(+), 169 deletions(-) create mode 100644 library/test_error_handling.nim diff --git a/library/archivist_thread_requests/archivist_thread_request.nim b/library/archivist_thread_requests/archivist_thread_request.nim index 03894782..bedc4339 100644 --- a/library/archivist_thread_requests/archivist_thread_request.nim +++ b/library/archivist_thread_requests/archivist_thread_request.nim @@ -59,9 +59,11 @@ proc handleRes[T: string | void | seq[byte]]( foreignThreadGc: let msg = $res.error if msg == "": - request[].callback(RET_ERR, nil, cast[csize_t](0), request[].userData) + let errorMsg = formatErrorMessage(RET_ERR, "request processing", "Unknown error occurred") + safeCallback(request[].callback, RET_ERR, errorMsg, request[].userData) else: - safeCallback(request[].callback, RET_ERR, msg, request[].userData) + let errorMsg = formatErrorMessage(RET_ERR, "request processing", msg) + safeCallback(request[].callback, RET_ERR, errorMsg, request[].userData) return foreignThreadGc: diff --git a/library/ffi_types.nim b/library/ffi_types.nim index fc7c9d2a..8ca45818 100644 --- a/library/ffi_types.nim +++ b/library/ffi_types.nim @@ -22,6 +22,11 @@ const RET_OK*: cint = 0 const RET_ERR*: cint = 1 const RET_MISSING_CALLBACK*: cint = 2 const RET_PROGRESS*: cint = 3 +const RET_INVALID_PARAM*: cint = 4 +const RET_NULL_CONTEXT*: cint = 5 +const RET_THREAD_ERROR*: cint = 6 +const RET_MEMORY_ERROR*: cint = 7 +const RET_TIMEOUT*: cint = 8 ################################################################################ ### Safe callback string handling @@ -100,6 +105,83 @@ proc progress*(callback: ArchivistCallback, data: string, userData: pointer): ci safeCallback(callback, RET_PROGRESS, data, userData) return RET_OK +################################################################################ +### Standardized Error Handling Utilities + +proc formatErrorMessage*(errorCode: cint, context: string, details: string = ""): string = + ## Standardized error message formatting + let errorType = case errorCode: + of RET_INVALID_PARAM: "Invalid parameter" + of RET_NULL_CONTEXT: "Null context" + of RET_THREAD_ERROR: "Thread error" + of RET_MEMORY_ERROR: "Memory error" + of RET_TIMEOUT: "Timeout error" + of RET_MISSING_CALLBACK: "Missing callback" + of RET_ERR: "General error" + else: "Unknown error" + + if details.len > 0: + errorType & " in " & context & ": " & details + else: + errorType & " in " & context + +proc handleRequestError*( + callback: ArchivistCallback, + userData: pointer, + errorCode: cint, + context: string, + details: string = "", + request: pointer = nil, + cleanupProc: proc(request: pointer) {.raises: [].} = nil +): cint = + ## Standardized error handling for failed requests + ## Handles cleanup and consistent error reporting + if not request.isNil and not cleanupProc.isNil: + cleanupProc(request) + + let errorMsg = formatErrorMessage(errorCode, context, details) + safeCallback(callback, errorCode, errorMsg, userData) + return errorCode + +proc handleRequestSuccess*( + callback: ArchivistCallback, + userData: pointer, + message: string = "", + request: pointer = nil, + cleanupProc: proc(request: pointer) {.raises: [].} = nil +): cint = + ## Standardized success handling for completed requests + ## Handles cleanup and consistent success reporting + if not request.isNil and not cleanupProc.isNil: + cleanupProc(request) + + safeCallback(callback, RET_OK, message, userData) + return RET_OK + +proc validateContext*(ctx: pointer): cint = + ## Standardized context validation + if ctx.isNil: + return RET_NULL_CONTEXT + return RET_OK + +proc validateCallback*(callback: ArchivistCallback): cint = + ## Standardized callback validation + if callback.isNil: + return RET_MISSING_CALLBACK + return RET_OK + +proc validateParams*(ctx: pointer, callback: ArchivistCallback): cint = + ## Standardized parameter validation for common FFI function signature + let ctxResult = validateContext(ctx) + if ctxResult != RET_OK: + return ctxResult + + let callbackResult = validateCallback(callback) + if callbackResult != RET_OK: + return callbackResult + + return RET_OK + ################################################################################ ### FFI utils diff --git a/library/libarchivist.nim b/library/libarchivist.nim index 6bfd325f..2bd3a0aa 100644 --- a/library/libarchivist.nim +++ b/library/libarchivist.nim @@ -38,11 +38,46 @@ logScope: template checkLibarchivistParams*( ctx: ptr ArchivistContext, callback: ArchivistCallback, userData: pointer ) = + let validationResult = validateParams(cast[pointer](ctx), callback) + if validationResult != RET_OK: + return validationResult + if not isNil(ctx): ctx[].userData = userData - if isNil(callback): - return RET_MISSING_CALLBACK +template handleRequestResult*( + result: Result[void, string], + request: pointer, + callback: ArchivistCallback, + userData: pointer, + context: string +): cint = + if result.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, context, $result.error, request, + proc(req: pointer) {.raises: [].} = + when compiles(req.cleanupRequest()): + req.cleanupRequest() + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", request, + proc(req: pointer) {.raises: [].} = + when compiles(req.cleanupRequest()): + req.cleanupRequest() + deallocShared(req) + ) + +template handleRequestResultNoCleanup*( + result: Result[void, string], + callback: ArchivistCallback, + userData: pointer, + context: string +): cint = + if result.isErr: + return handleRequestError(callback, userData, RET_THREAD_ERROR, context, $result.error) + else: + return handleRequestSuccess(callback, userData) proc libarchivistNimMain() {.importc.} @@ -75,15 +110,18 @@ proc archivist_new*( ): pointer {.dynlib, exported.} = initializeLibrary() - if isNil(callback): - error "Failed to create Archivist instance: the callback is missing." + let validationResult = validateParams(nil, callback) + if validationResult != RET_OK: + let errorMsg = formatErrorMessage(validationResult, "archivist_new", "Callback validation failed") + if not callback.isNil: + safeCallback(callback, validationResult, errorMsg, userData) return nil let safeConfig = if validateCString(configToml): safeStringCopy(configToml, 10000) else: "" var ctx = archivist_context.createArchivistContext().valueOr: - let msg = $error - safeCallback(callback, RET_ERR, msg, userData) + let errorMsg = formatErrorMessage(RET_ERR, "archivist_new", "Failed to create context: " & $error) + safeCallback(callback, RET_ERR, errorMsg, userData) return nil ctx.userData = userData @@ -94,10 +132,10 @@ proc archivist_new*( archivist_context.sendRequestToArchivistThread( ctx, RequestType.LIFECYCLE, reqContent, callback, userData ).isOkOr: - let msg = $error + let errorMsg = formatErrorMessage(RET_THREAD_ERROR, "archivist_new", "Failed to send request: " & $error) reqContent.cleanupRequest() deallocShared(reqContent) - safeCallback(callback, RET_ERR, msg, userData) + safeCallback(callback, RET_THREAD_ERROR, errorMsg, userData) return nil return ctx @@ -110,11 +148,7 @@ proc archivist_create*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE, "") let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_create") proc archivist_start*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -124,11 +158,7 @@ proc archivist_start*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START, "") let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_start") proc archivist_stop*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -138,11 +168,7 @@ proc archivist_stop*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP, "") let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_stop") proc archivist_close*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -152,8 +178,7 @@ proc archivist_close*( let ctx = cast[ptr ArchivistContext](ctx) # TODO: Need to double check this part let ack = "closed" - safeCallback(callback, RET_OK, ack, userData) - return RET_OK + return handleRequestSuccess(callback, userData, ack) proc archivist_destroy*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -163,11 +188,10 @@ proc archivist_destroy*( let ctx = cast[ptr ArchivistContext](ctx) let destroyRes = destroyArchivistContext(ctx) if destroyRes.isErr: - return callback.error(destroyRes.error, userData) + return handleRequestError(callback, userData, RET_ERR, "archivist_destroy", $destroyRes.error) let ack = "destroyed" - safeCallback(callback, RET_OK, ack, userData) - return RET_OK + return handleRequestSuccess(callback, userData, ack) ################################################################################ ### Version Information @@ -180,10 +204,7 @@ proc archivist_version*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.VERSION) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_version") proc archivist_revision*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -193,10 +214,7 @@ proc archivist_revision*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.REVISION) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_revision") proc archivist_repo*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -206,10 +224,7 @@ proc archivist_repo*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.REPO) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_repo") ################################################################################ ### Debug Operations @@ -222,11 +237,7 @@ proc archivist_debug*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDebugRequest.createShared(NodeDebugMsgType.DEBUG) let res = ctx.sendRequestToArchivistThread(RequestType.DEBUG, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_debug") proc archivist_spr*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -236,10 +247,7 @@ proc archivist_spr*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.SPR) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_spr") proc archivist_peer_id*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -249,10 +257,7 @@ proc archivist_peer_id*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.PEERID) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_peer_id") proc archivist_log_level*( ctx: pointer, logLevel: cstring, callback: ArchivistCallback, userData: pointer @@ -265,11 +270,7 @@ proc archivist_log_level*( let req = NodeDebugRequest.createShared(NodeDebugMsgType.LOG_LEVEL, safeLogLevel) let res = ctx.sendRequestToArchivistThread(RequestType.DEBUG, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_log_level") ################################################################################ ### P2P Networking @@ -296,10 +297,7 @@ proc archivist_connect*( let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECT, safePeerId, addresses) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_connect") proc archivist_connected_peers*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -309,10 +307,7 @@ proc archivist_connected_peers*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECTED_PEERS) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_connected_peers") proc archivist_connected_peer_ids*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -322,10 +317,7 @@ proc archivist_connected_peer_ids*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECTED_PEER_IDS) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_connected_peer_ids") proc archivist_find_peer*( ctx: pointer, peerId: cstring, callback: ArchivistCallback, userData: pointer @@ -335,10 +327,7 @@ proc archivist_find_peer*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeP2PRequest.createShared(NodeP2PMsgType.FIND_PEER, $peerId) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_find_peer") proc archivist_disconnect*( ctx: pointer, peerId: cstring, callback: ArchivistCallback, userData: pointer @@ -348,10 +337,7 @@ proc archivist_disconnect*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeP2PRequest.createShared(NodeP2PMsgType.DISCONNECT, $peerId) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - if res.isErr: - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_disconnect") ################################################################################ ### Upload Operations @@ -368,11 +354,7 @@ proc archivist_upload_init*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeUploadRequest.createShared(NodeUploadMsgType.INIT, $filepath, @[], chunkSize.int) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_upload_init") proc archivist_upload_chunk*( ctx: pointer, @@ -392,11 +374,7 @@ proc archivist_upload_chunk*( let req = NodeUploadRequest.createShared(NodeUploadMsgType.CHUNK, $sessionId, chunkData) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_upload_chunk") proc archivist_upload_finalize*( ctx: pointer, @@ -409,11 +387,7 @@ proc archivist_upload_finalize*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeUploadRequest.createShared(NodeUploadMsgType.FINALIZE, $sessionId) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_upload_finalize") proc archivist_upload_cancel*( ctx: pointer, @@ -426,11 +400,7 @@ proc archivist_upload_cancel*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeUploadRequest.createShared(NodeUploadMsgType.CANCEL, $sessionId) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_upload_cancel") proc archivist_upload_file*( ctx: pointer, @@ -443,11 +413,7 @@ proc archivist_upload_file*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeUploadRequest.createShared(NodeUploadMsgType.FILE, $sessionId) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_upload_file") ################################################################################ ### Download Operations @@ -465,11 +431,7 @@ proc archivist_download_init*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.INIT, $cid, chunkSize.int, local) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_download_init") proc archivist_download_stream*( ctx: pointer, @@ -488,11 +450,7 @@ proc archivist_download_stream*( fp = $filepath let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.STREAM, $cid, chunkSize.int, local, fp) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_download_stream") proc archivist_download_chunk*( ctx: pointer, @@ -505,11 +463,7 @@ proc archivist_download_chunk*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CHUNK, $cid) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_download_chunk") proc archivist_download_cancel*( ctx: pointer, @@ -522,11 +476,7 @@ proc archivist_download_cancel*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CANCEL, $cid) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_download_cancel") proc archivist_download_manifest*( ctx: pointer, @@ -539,11 +489,7 @@ proc archivist_download_manifest*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.MANIFEST, $cid) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_download_manifest") ################################################################################ ### Storage Operations @@ -556,11 +502,7 @@ proc archivist_list*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.LIST) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_list") proc archivist_space*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -570,11 +512,7 @@ proc archivist_space*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_space") proc archivist_delete*( ctx: pointer, cid: cstring, callback: ArchivistCallback, userData: pointer @@ -584,11 +522,7 @@ proc archivist_delete*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.DELETE, cid) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_delete") proc archivist_fetch*( ctx: pointer, cid: cstring, callback: ArchivistCallback, userData: pointer @@ -598,11 +532,7 @@ proc archivist_fetch*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.FETCH, cid) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_fetch") proc archivist_exists*( ctx: pointer, cid: cstring, callback: ArchivistCallback, userData: pointer @@ -612,11 +542,7 @@ proc archivist_exists*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.EXISTS, cid) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_exists") proc archivist_local_size*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -626,11 +552,7 @@ proc archivist_local_size*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_local_size") proc archivist_block_count*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -640,19 +562,19 @@ proc archivist_block_count*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - if res.isErr: - req.cleanupRequest() - deallocShared(req) - return callback.error(res.error, userData) - return RET_OK + return handleRequestResult(res, req, callback, userData, "archivist_block_count") ################################################################################ ### Event Callback proc archivist_set_event_callback*( ctx: pointer, callback: ArchivistCallback, userData: pointer -) {.dynlib, exported.} = +): cint {.dynlib, exported.} = + let validationResult = validateParams(ctx, callback) + if validationResult != RET_OK: + return validationResult + let ctx = cast[ptr ArchivistContext](ctx) - if not ctx.isNil: - ctx.eventCallback = cast[pointer](callback) - ctx.eventUserData = userData + ctx.eventCallback = cast[pointer](callback) + ctx.eventUserData = userData + return RET_OK diff --git a/library/test_error_handling.nim b/library/test_error_handling.nim new file mode 100644 index 00000000..85f62839 --- /dev/null +++ b/library/test_error_handling.nim @@ -0,0 +1,69 @@ +## Test file for standardized error handling +## This file tests the new error handling patterns + +import std/[unittest, strutils] +import ./ffi_types + +suite "Standardized Error Handling Tests": + + test "Error code constants are defined": + check RET_OK == 0 + check RET_ERR == 1 + check RET_MISSING_CALLBACK == 2 + check RET_PROGRESS == 3 + check RET_INVALID_PARAM == 4 + check RET_NULL_CONTEXT == 5 + check RET_THREAD_ERROR == 6 + check RET_MEMORY_ERROR == 7 + check RET_TIMEOUT == 8 + + test "formatErrorMessage creates consistent error messages": + let msg1 = formatErrorMessage(RET_ERR, "test_function", "Something went wrong") + check msg1 == "General error in test_function: Something went wrong" + + let msg2 = formatErrorMessage(RET_INVALID_PARAM, "validate_input", "Invalid CID format") + check msg2 == "Invalid parameter in validate_input: Invalid CID format" + + let msg3 = formatErrorMessage(RET_NULL_CONTEXT, "archivist_create") + check msg3 == "Null context in archivist_create" + + let msg4 = formatErrorMessage(RET_THREAD_ERROR, "send_request", "Thread communication failed") + check msg4 == "Thread error in send_request: Thread communication failed" + + test "validateContext returns correct error codes": + check validateContext(nil) == RET_NULL_CONTEXT + check validateContext(cast[pointer](0x1234)) == RET_OK + + test "validateCallback returns correct error codes": + check validateCallback(nil) == RET_MISSING_CALLBACK + # Create a dummy callback for testing + let dummyCallback: ArchivistCallback = proc(callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = discard + check validateCallback(dummyCallback) == RET_OK + + test "validateParams combines context and callback validation": + let dummyCallback: ArchivistCallback = proc(callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = discard + + # Both null + check validateParams(nil, nil) == RET_NULL_CONTEXT + + # Context null, callback valid + check validateParams(nil, dummyCallback) == RET_NULL_CONTEXT + + # Context valid, callback null + check validateParams(cast[pointer](0x1234), nil) == RET_MISSING_CALLBACK + + # Both valid + check validateParams(cast[pointer](0x1234), dummyCallback) == RET_OK + + test "Error message formatting handles edge cases": + # Empty details + let msg1 = formatErrorMessage(RET_ERR, "test", "") + check msg1 == "General error in test" + + # Unknown error code + let msg2 = formatErrorMessage(999, "unknown_function", "Unknown error") + check msg2 == "Unknown error in unknown_function: Unknown error" + +when isMainModule: + echo "Running standardized error handling tests..." + echo "All tests should pass to verify the error handling standardization works correctly." \ No newline at end of file From 477d250783ce87b9f1bd2d4c9de2ba970cca19f7 Mon Sep 17 00:00:00 2001 From: Xav Date: Fri, 6 Mar 2026 10:12:25 -0500 Subject: [PATCH 06/16] feat(lib): implement toml validation --- .../requests/node_lifecycle_request.nim | 5 + library/libarchivist.nim | 12 + library/test_toml_validation.nim | 755 +++++++++++++++++ library/toml_validation.nim | 769 ++++++++++++++++++ 4 files changed, 1541 insertions(+) create mode 100644 library/test_toml_validation.nim create mode 100644 library/toml_validation.nim diff --git a/library/archivist_thread_requests/requests/node_lifecycle_request.nim b/library/archivist_thread_requests/requests/node_lifecycle_request.nim index 821d200c..8b24bab1 100644 --- a/library/archivist_thread_requests/requests/node_lifecycle_request.nim +++ b/library/archivist_thread_requests/requests/node_lifecycle_request.nim @@ -15,6 +15,7 @@ import toml_serialization import ../../../archivist/conf import ../../alloc +import ../../toml_validation import ../../../archivist/utils import ../../../archivist/utils/[keyutils, fileutils] import ../../../archivist/units @@ -77,6 +78,10 @@ proc createArchivist( ): Future[Result[NodeServer, string]] {.async: (raises: []).} = var conf: NodeConf + let tomlValidationResult = validateTomlCString(configToml) + if tomlValidationResult.isErr: + return err("Failed to create Archivist: TOML validation failed: " & formatError(tomlValidationResult.error)) + try: conf = NodeConf.load( version = nodeFullVersion, diff --git a/library/libarchivist.nim b/library/libarchivist.nim index 2bd3a0aa..51e92f60 100644 --- a/library/libarchivist.nim +++ b/library/libarchivist.nim @@ -31,6 +31,7 @@ import ./archivist_thread_requests/requests/node_download_request import ./archivist_thread_requests/requests/node_storage_request import ./ffi_types import ./alloc +import ./toml_validation logScope: topics = "libarchivist" @@ -117,6 +118,17 @@ proc archivist_new*( safeCallback(callback, validationResult, errorMsg, userData) return nil + let tomlValidationResult = validateTomlCString(configToml) + if tomlValidationResult.isErr: + let errorMsg = formatErrorMessage( + RET_INVALID_PARAM, + "archivist_new", + "TOML validation failed: " & formatError(tomlValidationResult.error) + ) + if not callback.isNil: + safeCallback(callback, RET_INVALID_PARAM, errorMsg, userData) + return nil + let safeConfig = if validateCString(configToml): safeStringCopy(configToml, 10000) else: "" var ctx = archivist_context.createArchivistContext().valueOr: diff --git a/library/test_toml_validation.nim b/library/test_toml_validation.nim new file mode 100644 index 00000000..d250edee --- /dev/null +++ b/library/test_toml_validation.nim @@ -0,0 +1,755 @@ +## TOML Validation Tests +## +## Comprehensive tests for the TOML validation module to ensure +## all validation functions work correctly. + +import std/[unittest, strutils] +import results +import ./toml_validation + +suite "TOML Validation Tests": + + suite "Size Validation": + test "Empty TOML should pass size validation": + let result = validateSize("") + check result.isOk + + test "Small TOML should pass size validation": + let toml = "logLevel = \"info\"" + let result = validateSize(toml) + check result.isOk + + test "TOML at max size should pass validation": + let toml = "a".repeat(DefaultMaxSize) + let result = validateSize(toml) + check result.isOk + + test "TOML exceeding max size should fail validation": + let toml = "a".repeat(DefaultMaxSize + 1) + let result = validateSize(toml) + check result.isErr + check "exceeds maximum size" in result.error.message + + test "Custom max size should be respected": + let config = TomlValidationConfig(maxSize: 100) + let toml = "a".repeat(101) + let result = validateSize(toml, config) + check result.isErr + + suite "Line Length Validation": + test "Normal lines should pass validation": + let toml = """ +logLevel = "info" +logFormat = "auto" +""" + let result = validateLineLength(toml) + check result.isOk + + test "Line at max length should pass validation": + let toml = "a".repeat(DefaultMaxLineLength) + let result = validateLineLength(toml) + check result.isOk + + test "Line exceeding max length should fail validation": + let toml = "a".repeat(DefaultMaxLineLength + 1) + let result = validateLineLength(toml) + check result.isErr + check "exceeds maximum length" in result.error.message + check result.error.line == 1 + + test "Custom max line length should be respected": + let config = TomlValidationConfig(maxLineLength: 50) + let toml = "a".repeat(51) + let result = validateLineLength(toml, config) + check result.isErr + + suite "Syntax Validation": + test "Valid TOML should pass syntax validation": + let toml = """ +logLevel = "info" +logFormat = "auto" +metricsEnabled = true +""" + let result = validateSyntax(toml) + check result.isOk + + test "TOML with table headers should pass validation": + let toml = """ +[section] +key = "value" +""" + let result = validateSyntax(toml) + check result.isOk + + test "TOML with inline tables should pass validation": + let toml = """ +[section] +key = { name = "value" } +""" + let result = validateSyntax(toml) + check result.isOk + + test "TOML with inline tables disabled should fail": + let config = TomlValidationConfig(allowInlineTables: false) + let toml = "key = { name = \"value\" }" + let result = validateSyntax(toml, config) + check result.isErr + check "Inline tables are not allowed" in result.error.message + + test "Unmatched closing bracket should fail validation": + let toml = """ +[section] +key = "value" +] +""" + let result = validateSyntax(toml) + check result.isErr + check "Unmatched closing bracket" in result.error.message + + test "Unmatched opening bracket should fail validation": + let toml = """ +[section +key = "value" +""" + let result = validateSyntax(toml) + check result.isErr + check "Unclosed bracket" in result.error.message + + test "Unmatched closing brace should fail validation": + let toml = """ +key = { name = "value" } +} +""" + let result = validateSyntax(toml) + check result.isErr + check "Unmatched closing brace" in result.error.message + + test "Null byte should fail validation": + let toml = "key = \"value\0\"" + let result = validateSyntax(toml) + check result.isErr + check "Null byte" in result.error.message + + test "Invalid control character should fail validation": + let toml = "key = \"value\x01\"" + let result = validateSyntax(toml) + check result.isErr + check "Invalid control character" in result.error.message + + suite "Security Validation": + test "Clean TOML should pass security validation": + let toml = """ +logLevel = "info" +logFormat = "auto" +""" + let result = validateSecurity(toml) + check result.isOk + + test "Script injection should be detected": + let toml = "key = \"\"" + let result = validateSecurity(toml) + check result.isErr + check "script injection" in result.error.message + + test "JavaScript injection should be detected": + let toml = "key = \"javascript:alert('xss')\"" + let result = validateSecurity(toml) + check result.isErr + check "JavaScript injection" in result.error.message + + test "Data URI injection should be detected": + let toml = "key = \"data:text/html,\"" + let result = validateSecurity(toml) + check result.isErr + check "data URI injection" in result.error.message + + test "Event handler injection should be detected": + let toml = "key = \"onclick=alert('xss')\"" + let result = validateSecurity(toml) + check result.isErr + check "event handler injection" in result.error.message + + test "Template injection should be detected": + let toml = "key = \"${malicious}\"" + let result = validateSecurity(toml) + check result.isErr + check "template injection" in result.error.message + + test "Path traversal should be detected": + let toml = "key = \"../../etc/passwd\"" + let result = validateSecurity(toml) + check result.isErr + check "path traversal" in result.error.message + + test "URL-encoded path traversal should be detected": + let toml = "key = \"%2e%2e%2fetc%2fpasswd\"" + let result = validateSecurity(toml) + check result.isErr + check "path traversal" in result.error.message + + test "Command injection should be detected": + let toml = "key = \"value; rm -rf /\"" + let result = validateSecurity(toml) + check result.isErr + check "command injection" in result.error.message + + test "Pipe command injection should be detected": + let toml = "key = \"value | cat /etc/passwd\"" + let result = validateSecurity(toml) + check result.isErr + check "command injection" in result.error.message + + test "Backtick command injection should be detected": + let toml = "key = \"`malicious command`\"" + let result = validateSecurity(toml) + check result.isErr + check "command injection" in result.error.message + + suite "Port Validation": + test "Valid port should pass validation": + let result = validatePort("8080", "testPort") + check result.isOk + + test "Port at minimum should pass validation": + let result = validatePort("1", "testPort") + check result.isOk + + test "Port at maximum should pass validation": + let result = validatePort("65535", "testPort") + check result.isOk + + test "Port below minimum should fail validation": + let result = validatePort("0", "testPort") + check result.isErr + check "must be between" in result.error.message + + test "Port above maximum should fail validation": + let result = validatePort("65536", "testPort") + check result.isErr + check "must be between" in result.error.message + + test "Invalid port string should fail validation": + let result = validatePort("invalid", "testPort") + check result.isErr + check "not a valid integer" in result.error.message + + suite "IP Address Validation": + test "Valid IPv4 address should pass validation": + let result = validateIpAddress("127.0.0.1", "testAddress") + check result.isOk + + test "Valid IPv4 address with high octets should pass": + let result = validateIpAddress("192.168.255.255", "testAddress") + check result.isOk + + test "IPv4 address with octet above 255 should fail": + let result = validateIpAddress("192.168.256.1", "testAddress") + check result.isErr + check "between 0 and 255" in result.error.message + + test "Invalid IPv4 address should fail validation": + let result = validateIpAddress("invalid", "testAddress") + check result.isErr + check "not a valid IPv4" in result.error.message + + test "Valid IPv6 address should pass validation": + let result = validateIpAddress("::1", "testAddress") + check result.isOk + + test "Valid IPv6 address with multiple segments should pass": + let result = validateIpAddress("2001:db8::1", "testAddress") + check result.isOk + + suite "MultiAddress Validation": + test "Valid multiaddress should pass validation": + let result = validateMultiAddress("/ip4/127.0.0.1/tcp/8080", "testMultiAddr") + check result.isOk + + test "Multiaddress without leading slash should fail": + let result = validateMultiAddress("ip4/127.0.0.1/tcp/8080", "testMultiAddr") + check result.isErr + check "must start with '/'" in result.error.message + + test "Multiaddress with null byte should fail": + let result = validateMultiAddress("/ip4/127.0.0.1/tcp/8080\0", "testMultiAddr") + check result.isErr + check "contains invalid characters" in result.error.message + + suite "Duration Validation": + test "Valid duration in seconds should pass": + let result = validateDuration("60s", "testDuration") + check result.isOk + + test "Valid duration in minutes should pass": + let result = validateDuration("5m", "testDuration") + check result.isOk + + test "Valid duration in hours should pass": + let result = validateDuration("1h", "testDuration") + check result.isOk + + test "Valid duration in days should pass": + let result = validateDuration("1d", "testDuration") + check result.isOk + + test "Valid duration in milliseconds should pass": + let result = validateDuration("500ms", "testDuration") + check result.isOk + + test "Empty duration should fail validation": + let result = validateDuration("", "testDuration") + check result.isErr + check "empty value" in result.error.message + + test "Duration without numeric value should fail": + let result = validateDuration("s", "testDuration") + check result.isErr + check "missing numeric value" in result.error.message + + test "Duration with invalid unit should fail": + let result = validateDuration("60x", "testDuration") + check result.isErr + check "invalid unit" in result.error.message + + test "Negative duration should fail validation": + let result = validateDuration("-60s", "testDuration") + check result.isErr + check "negative values not allowed" in result.error.message + + suite "Enum Validation": + test "Valid enum value should pass": + let result = validateEnum("info", "logLevel", ValidLogLevels) + check result.isOk + + test "Invalid enum value should fail": + let result = validateEnum("invalid", "logLevel", ValidLogLevels) + check result.isErr + check "must be one of" in result.error.message + + suite "Range Validation": + test "Value within range should pass": + let result = validateRange("100", "testValue", 1, 1000) + check result.isOk + + test "Value at minimum should pass": + let result = validateRange("1", "testValue", 1, 1000) + check result.isOk + + test "Value at maximum should pass": + let result = validateRange("1000", "testValue", 1, 1000) + check result.isOk + + test "Value below minimum should fail": + let result = validateRange("0", "testValue", 1, 1000) + check result.isErr + check "must be between" in result.error.message + + test "Value above maximum should fail": + let result = validateRange("1001", "testValue", 1, 1000) + check result.isErr + check "must be between" in result.error.message + + test "Invalid integer string should fail": + let result = validateRange("invalid", "testValue", 1, 1000) + check result.isErr + check "not a valid integer" in result.error.message + + suite "Boolean Validation": + test "True should pass validation": + let result = validateBoolean("true", "testBool") + check result.isOk + + test "False should pass validation": + let result = validateBoolean("false", "testBool") + check result.isOk + + test "Uppercase TRUE should pass validation": + let result = validateBoolean("TRUE", "testBool") + check result.isOk + + test "Uppercase FALSE should pass validation": + let result = validateBoolean("FALSE", "testBool") + check result.isOk + + test "Invalid boolean should fail validation": + let result = validateBoolean("invalid", "testBool") + check result.isErr + check "must be 'true' or 'false'" in result.error.message + + suite "File Path Validation": + test "Valid file path should pass": + let result = validateFilePath("/path/to/file", "testPath") + check result.isOk + + test "Relative file path should pass": + let result = validateFilePath("relative/path", "testPath") + check result.isOk + + test "Empty file path should fail": + let result = validateFilePath("", "testPath") + check result.isErr + check "empty value" in result.error.message + + test "File path with null byte should fail": + let result = validateFilePath("/path\0/file", "testPath") + check result.isErr + check "contains null byte" in result.error.message + + suite "Ethereum Address Validation": + test "Valid Ethereum address should pass": + let result = validateEthAddress("0x1234567890123456789012345678901234567890", "testAddress") + check result.isOk + + test "Empty Ethereum address should pass (optional field)": + let result = validateEthAddress("", "testAddress") + check result.isOk + + test "Ethereum address without 0x prefix should fail": + let result = validateEthAddress("1234567890123456789012345678901234567890", "testAddress") + check result.isErr + check "must start with '0x'" in result.error.message + + test "Ethereum address with wrong length should fail": + let result = validateEthAddress("0x123456789012345678901234567890123456789", "testAddress") + check result.isErr + check "must be 42 characters" in result.error.message + + test "Ethereum address with invalid hex should fail": + let result = validateEthAddress("0x123456789012345678901234567890123456789g", "testAddress") + check result.isErr + check "invalid hex characters" in result.error.message + + suite "Content Validation": + test "Valid logLevel should pass": + let toml = "logLevel = \"info\"" + let result = validateContent(toml) + check result.isOk + + test "Invalid logLevel should fail": + let toml = "logLevel = \"invalid\"" + let result = validateContent(toml) + check result.isErr + check "logLevel" in result.error.message + + test "Valid logFormat should pass": + let toml = "logFormat = \"auto\"" + let result = validateContent(toml) + check result.isOk + + test "Invalid logFormat should fail": + let toml = "logFormat = \"invalid\"" + let result = validateContent(toml) + check result.isErr + check "logFormat" in result.error.message + + test "Valid repoKind should pass": + let toml = "repoKind = \"fs\"" + let result = validateContent(toml) + check result.isOk + + test "Invalid repoKind should fail": + let toml = "repoKind = \"invalid\"" + let result = validateContent(toml) + check result.isErr + check "repoKind" in result.error.message + + test "Valid proverBackend should pass": + let toml = "proverBackend = \"nimgroth16\"" + let result = validateContent(toml) + check result.isOk + + test "Invalid proverBackend should fail": + let toml = "proverBackend = \"invalid\"" + let result = validateContent(toml) + check result.isErr + check "proverBackend" in result.error.message + + test "Valid curve should pass": + let toml = "curve = \"bn128\"" + let result = validateContent(toml) + check result.isOk + + test "Invalid curve should fail": + let toml = "curve = \"invalid\"" + let result = validateContent(toml) + check result.isErr + check "curve" in result.error.message + + test "Valid metricsPort should pass": + let toml = "metricsPort = 8008" + let result = validateContent(toml) + check result.isOk + + test "Invalid metricsPort should fail": + let toml = "metricsPort = 99999" + let result = validateContent(toml) + check result.isErr + check "metricsPort" in result.error.message + + test "Valid maxPeers should pass": + let toml = "maxPeers = 160" + let result = validateContent(toml) + check result.isOk + + test "Invalid maxPeers should fail": + let toml = "maxPeers = 99999" + let result = validateContent(toml) + check result.isErr + check "maxPeers" in result.error.message + + test "Valid numThreads should pass": + let toml = "numThreads = 4" + let result = validateContent(toml) + check result.isOk + + test "Invalid numThreads should fail": + let toml = "numThreads = 999" + let result = validateContent(toml) + check result.isErr + check "numThreads" in result.error.message + + test "Valid validatorMaxSlots should pass": + let toml = "validatorMaxSlots = 1000" + let result = validateContent(toml) + check result.isOk + + test "Invalid validatorMaxSlots should fail": + let toml = "validatorMaxSlots = -1" + let result = validateContent(toml) + check result.isErr + check "validatorMaxSlots" in result.error.message + + test "Valid validatorGroups should pass": + let toml = "validatorGroups = 16" + let result = validateContent(toml) + check result.isOk + + test "Invalid validatorGroups should fail": + let toml = "validatorGroups = 1" + let result = validateContent(toml) + check result.isErr + check "validatorGroups" in result.error.message + + test "Valid maxSlotDepth should pass": + let toml = "maxSlotDepth = 16" + let result = validateContent(toml) + check result.isOk + + test "Invalid maxSlotDepth should fail": + let toml = "maxSlotDepth = 100" + let result = validateContent(toml) + check result.isErr + check "maxSlotDepth" in result.error.message + + test "Valid cacheSize should pass": + let toml = "cacheSize = 0" + let result = validateContent(toml) + check result.isOk + + test "Invalid cacheSize should fail": + let toml = "cacheSize = -1" + let result = validateContent(toml) + check result.isErr + check "cacheSize" in result.error.message + + test "Valid storageQuota should pass": + let toml = "storageQuota = 1073741824" + let result = validateContent(toml) + check result.isOk + + test "Invalid storageQuota should fail": + let toml = "storageQuota = 0" + let result = validateContent(toml) + check result.isErr + check "storageQuota" in result.error.message + + test "Valid blockTtl should pass": + let toml = "blockTtl = 3600" + let result = validateContent(toml) + check result.isOk + + test "Invalid blockTtl should fail": + let toml = "blockTtl = 100000" + let result = validateContent(toml) + check result.isErr + check "blockTtl" in result.error.message + + test "Valid blockMaintenanceInterval should pass": + let toml = "blockMaintenanceInterval = 3600" + let result = validateContent(toml) + check result.isOk + + test "Invalid blockMaintenanceInterval should fail": + let toml = "blockMaintenanceInterval = 10" + let result = validateContent(toml) + check result.isErr + check "blockMaintenanceInterval" in result.error.message + + test "Valid metricsAddress should pass": + let toml = "metricsAddress = \"127.0.0.1\"" + let result = validateContent(toml) + check result.isOk + + test "Invalid metricsAddress should fail": + let toml = "metricsAddress = \"invalid\"" + let result = validateContent(toml) + check result.isErr + check "metricsAddress" in result.error.message + + test "Valid netPrivKeyFile should pass": + let toml = "netPrivKeyFile = \"key\"" + let result = validateContent(toml) + check result.isOk + + test "Invalid netPrivKeyFile should fail": + let toml = "netPrivKeyFile = \"\"" + let result = validateContent(toml) + check result.isErr + check "netPrivKeyFile" in result.error.message + + test "Valid marketplaceAddress should pass": + let toml = "marketplaceAddress = \"0x1234567890123456789012345678901234567890\"" + let result = validateContent(toml) + check result.isOk + + test "Invalid marketplaceAddress should fail": + let toml = "marketplaceAddress = \"invalid\"" + let result = validateContent(toml) + check result.isErr + check "marketplaceAddress" in result.error.message + + test "Comments should be ignored": + let toml = """ +# This is a comment +logLevel = "info" +# Another comment +""" + let result = validateContent(toml) + check result.isOk + + test "Table headers should be ignored": + let toml = """ +[section] +logLevel = "info" +""" + let result = validateContent(toml) + check result.isOk + + suite "Comprehensive TOML Validation": + test "Empty TOML should pass all validations": + let result = validateToml("") + check result.isOk + + test "Valid TOML should pass all validations": + let toml = """ +logLevel = "info" +logFormat = "auto" +metricsEnabled = true +metricsPort = 8008 +maxPeers = 160 +numThreads = 4 +""" + let result = validateToml(toml) + check result.isOk + + test "TOML with size violation should fail": + let toml = "a".repeat(DefaultMaxSize + 1) + let result = validateToml(toml) + check result.isErr + + test "TOML with syntax error should fail": + let toml = """ +logLevel = "info" +] +""" + let result = validateToml(toml) + check result.isErr + + test "TOML with security issue should fail": + let toml = "key = \"\"" + let result = validateToml(toml) + check result.isErr + + test "TOML with invalid content should fail": + let toml = "logLevel = \"invalid\"" + let result = validateToml(toml) + check result.isErr + + test "Complex valid TOML should pass all validations": + let toml = """ +logLevel = "info" +logFormat = "auto" +metricsEnabled = true +metricsAddress = "127.0.0.1" +metricsPort = 8008 +dataDir = "/tmp/archivist" +listenAddrs = ["/ip4/0.0.0.0/tcp/0"] +nat = "any" +discoveryPort = 8090 +netPrivKeyFile = "key" +maxPeers = 160 +numThreads = 4 +agentString = "Archivist Node" +apiBindAddress = "127.0.0.1" +apiPort = 8080 +repoKind = "fs" +storageQuota = 1073741824 +blockTtl = 3600 +blockMaintenanceInterval = 3600 +blockMaintenanceNumberOfBlocks = 100 +cacheSize = 0 +persistence = false +ethProvider = "ws://localhost:8545" +useSystemClock = false +validator = false +prover = false +circuitDir = "/tmp/archivist/circuits" +proverBackend = "nimgroth16" +curve = "bn128" +numProofSamples = 10 +maxSlotDepth = 16 +maxDatasetDepth = 16 +maxBlockDepth = 16 +maxCellElms = 256 +""" + let result = validateToml(toml) + check result.isOk + + suite "CString Validation": + test "nil cstring should pass validation": + let result = validateTomlCString(nil) + check result.isOk + + test "Valid cstring should pass validation": + let toml = "logLevel = \"info\"" + let result = validateTomlCString(toml) + check result.isOk + + test "Invalid cstring should fail validation": + let toml = "logLevel = \"invalid\"" + let result = validateTomlCString(toml) + check result.isErr + + suite "Error Formatting": + test "Error without line/column should format correctly": + let error = createValidationError("Test error message") + let formatted = formatError(error) + check "TOML validation error: Test error message" == formatted + + test "Error with line should format correctly": + let error = createValidationError("Test error message", line = 10) + let formatted = formatError(error) + check "line 10" in formatted + check "Test error message" in formatted + + test "Error with line and column should format correctly": + let error = createValidationError("Test error message", line = 10, column = 5) + let formatted = formatError(error) + check "line 10, column 5" in formatted + check "Test error message" in formatted + + test "Error with context should format correctly": + let error = createValidationError("Test error message", context = "Additional context") + let formatted = formatError(error) + check "Test error message" in formatted + check "Additional context" in formatted diff --git a/library/toml_validation.nim b/library/toml_validation.nim new file mode 100644 index 00000000..865119b6 --- /dev/null +++ b/library/toml_validation.nim @@ -0,0 +1,769 @@ +## TOML Input Validation Module +## +## This module provides comprehensive validation for TOML configuration input +## to prevent security issues and provide better error messages. +## +## Features: +## - Size limits to prevent resource exhaustion +## - Syntax validation before parsing +## - Content validation for specific configuration values +## - Security validation to prevent injection attacks +## - Clear, actionable error messages + +{.push raises: [].} + +import std/[strutils, re, parseutils, unicode] +import results + +type + TomlValidationError* = object + message*: string + line*: int + column*: int + context*: string + + TomlValidationResult* = Result[void, TomlValidationError] + + TomlValidationConfig* = object + maxSize*: int + maxLineLength*: int + maxNestingDepth*: int + maxArrayLength*: int + allowInlineTables*: bool + allowMultilineStrings*: bool + +const + DefaultMaxSize* = 1_000_000 # 1MB max TOML size + DefaultMaxLineLength* = 10_000 # 10KB max line length + DefaultMaxNestingDepth* = 50 # Max nesting depth for tables + DefaultMaxArrayLength* = 10_000 # Max array elements + DefaultValidationConfig* = TomlValidationConfig( + maxSize: DefaultMaxSize, + maxLineLength: DefaultMaxLineLength, + maxNestingDepth: DefaultMaxNestingDepth, + maxArrayLength: DefaultMaxArrayLength, + allowInlineTables: true, + allowMultilineStrings: true + ) + + ValidLogLevels* = ["trace", "debug", "info", "notice", "warn", "error", "fatal"] + + ValidLogFormats* = ["auto", "colors", "nocolors", "json", "none"] + + ValidRepoKinds* = ["fs", "sqlite", "leveldb"] + + ValidProverBackends* = ["nimgroth16", "circomcompat"] + + ValidCurves* = ["bn128"] + + ValidNatStrategies* = ["any", "none", "upnp", "pmp"] + + ValidPortRange* = 1..65535 + + ValidThreadCountRange* = 0..256 + + ValidValidatorGroupsRange* = 2..65535 + + ValidMaxSlotsRange* = 0..1000000 + + ValidMaxDepthRange* = 1..64 + + ValidMaxCellElementsRange* = 1..256 + + ValidCacheSizeRange* = 0..1_000_000_000 # 0 to 1GB + + ValidStorageQuotaRange* = 1_048_576..1_000_000_000_000 # 1MB to 1TB + + ValidBlockTtlRange* = 0..86400 # 0 to 24 hours + + ValidBlockMaintenanceIntervalRange* = 60..86400 # 1 minute to 24 hours + + ValidBlockMaintenanceNumberOfBlocksRange* = 1..100000 + + ValidMaxPeersRange* = 1..1000 + + ValidDiscoveryPortRange* = 1024..65535 + + ValidMetricsPortRange* = 1024..65535 + + ValidApiPortRange* = 1024..65535 + + ValidMaxPriorityFeePerGasRange* = 0..1_000_000_000_000 # 0 to 1 trillion wei + + ValidNumProofSamplesRange* = 1..1000 + + ValidMarketplaceRequestCacheSizeRange* = 1..65535 + + ValidValidatorGroupIndexRange* = 0..65534 + +proc createValidationError*( + message: string, + line: int = 0, + column: int = 0, + context: string = "" +): TomlValidationError = + TomlValidationError( + message: message, + line: line, + column: column, + context: context + ) + +proc formatError*(error: TomlValidationError): string = + if error.line > 0: + if error.column > 0: + result = "TOML validation error at line $1, column $2: $3" % [ + $error.line, $error.column, error.message + ] + else: + result = "TOML validation error at line $1: $2" % [ + $error.line, error.message + ] + else: + result = "TOML validation error: $1" % [error.message] + + if error.context.len > 0: + result.add("\nContext: " & error.context) + +proc validateSize*(toml: string, config: TomlValidationConfig = DefaultValidationConfig): TomlValidationResult = + if toml.len > config.maxSize: + return err(createValidationError( + "TOML configuration exceeds maximum size of $1 bytes (got $2 bytes)" % [ + $config.maxSize, $toml.len + ] + )) + return ok() + +proc validateLineLength*(toml: string, config: TomlValidationConfig = DefaultValidationConfig): TomlValidationResult = + var lineNum = 1 + for line in toml.splitLines(): + if line.len > config.maxLineLength: + return err(createValidationError( + "Line exceeds maximum length of $1 characters (got $2 characters)" % [ + $config.maxLineLength, $line.len + ], + line = lineNum + )) + lineNum += 1 + return ok() + +proc validateSyntax*(toml: string, config: TomlValidationConfig = DefaultValidationConfig): TomlValidationResult = + var lineNum = 1 + var columnNum = 1 + var i = 0 + let len = toml.len + + while i < len: + let c = toml[i] + + # Check for null bytes + if c == '\0': + return err(createValidationError( + "Null byte found in TOML configuration", + line = lineNum, + column = columnNum + )) + + # Check for control characters (except newline, tab, and carriage return) + if c < ' ' and c notin {'\n', '\t', '\r'}: + return err(createValidationError( + "Invalid control character found in TOML configuration", + line = lineNum, + column = columnNum + )) + + # Track line and column numbers + if c == '\n': + lineNum += 1 + columnNum = 1 + elif c == '\r': + # Skip carriage return if followed by newline + if i + 1 < len and toml[i + 1] == '\n': + i += 1 + lineNum += 1 + columnNum = 1 + else: + columnNum += 1 + + i += 1 + + # Check for balanced brackets + var bracketStack: seq[char] = @[] + lineNum = 1 + columnNum = 1 + i = 0 + + while i < len: + let c = toml[i] + + case c + of '[': + bracketStack.add('[') + of ']': + if bracketStack.len == 0 or bracketStack[^1] != '[': + return err(createValidationError( + "Unmatched closing bracket ']'", + line = lineNum, + column = columnNum + )) + bracketStack.delete(bracketStack.high) + of '{': + if not config.allowInlineTables: + return err(createValidationError( + "Inline tables are not allowed", + line = lineNum, + column = columnNum + )) + bracketStack.add('{') + of '}': + if bracketStack.len == 0 or bracketStack[^1] != '{': + return err(createValidationError( + "Unmatched closing brace '}'", + line = lineNum, + column = columnNum + )) + bracketStack.delete(bracketStack.high) + else: + discard + + # Track line and column numbers + if c == '\n': + lineNum += 1 + columnNum = 1 + elif c == '\r': + if i + 1 < len and toml[i + 1] == '\n': + i += 1 + lineNum += 1 + columnNum = 1 + else: + columnNum += 1 + + i += 1 + + # Check for unclosed brackets + if bracketStack.len > 0: + let unclosed = bracketStack[^1] + return err(createValidationError( + "Unclosed bracket '$1' found at end of TOML configuration" % [$unclosed], + line = lineNum, + column = columnNum + )) + + return ok() + +proc validateSecurity*(toml: string): TomlValidationResult = + let suspiciousPatterns = [ + (re"]*>.*?", "Potential script injection"), + (re"javascript:", "Potential JavaScript injection"), + (re"data:text/html", "Potential data URI injection"), + (re"on\\w+\\s*=", "Potential event handler injection"), + (re"\\$\\{.*?\\}", "Potential template injection"), + (re"\\x[0-9a-fA-F]{2}", "Potential hex escape injection"), + (re"\\u[0-9a-fA-F]{4}", "Potential Unicode escape injection"), + (re"\\U[0-9a-fA-F]{8}", "Potential Unicode escape injection"), + (re"\\n\\s*\\n\\s*\\n", "Excessive blank lines (potential DoS)"), + (re"\\[\\s*\\[\\s*\\[", "Excessive array nesting (potential DoS)"), + ] + + for (pattern, description) in suspiciousPatterns: + if toml.find(pattern) != -1: + return err(createValidationError( + "Security validation failed: $1" % [description] + )) + + # Check for path traversal attempts + let pathTraversalPatterns = [ + re"\\.\\.[\\\\/]", + re"%2e%2e", + re"%252e%252e", + ] + + for pattern in pathTraversalPatterns: + if toml.find(pattern) != -1: + return err(createValidationError( + "Security validation failed: potential path traversal attempt detected" + )) + + # Check for command injection patterns + let commandInjectionPatterns = [ + re";\\s*\\w+\\s*=", + re"\\|\\s*\\w+", + re"`[^`]*`", + re"\\$\\([^)]*\\)", + re"\\$\\{[^}]*\\}", + ] + + for pattern in commandInjectionPatterns: + if toml.find(pattern) != -1: + return err(createValidationError( + "Security validation failed: potential command injection attempt detected" + )) + + return ok() + +proc validatePort*(value: string, fieldName: string): TomlValidationResult = + ## Validate a port number + try: + let port = parseInt(value) + if port notin ValidPortRange: + return err(createValidationError( + "Invalid port number '$1' for field '$2': must be between $3 and $4" % [ + value, fieldName, $ValidPortRange.a, $ValidPortRange.b + ] + )) + except ValueError: + return err(createValidationError( + "Invalid port number '$1' for field '$2': not a valid integer" % [ + value, fieldName + ] + )) + return ok() + +proc validateIpAddress*(value: string, fieldName: string): TomlValidationResult = + let ipv4Pattern = re"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$" + if value.match(ipv4Pattern): + let parts = value.split('.') + for part in parts: + try: + let num = parseInt(part) + if num < 0 or num > 255: + return err(createValidationError( + "Invalid IP address '$1' for field '$2': each octet must be between 0 and 255" % [ + value, fieldName + ] + )) + except ValueError: + return err(createValidationError( + "Invalid IP address '$1' for field '$2': not a valid IPv4 address" % [ + value, fieldName + ] + )) + return ok() + + let ipv6Pattern = re"^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}$" + if value.match(ipv6Pattern): + return ok() + + return err(createValidationError( + "Invalid IP address '$1' for field '$2': not a valid IPv4 or IPv6 address" % [ + value, fieldName + ] + )) + +proc validateMultiAddress*(value: string, fieldName: string): TomlValidationResult = + if not value.startsWith('/'): + return err(createValidationError( + "Invalid multiaddress '$1' for field '$2': must start with '/'" % [ + value, fieldName + ] + )) + + let suspiciousChars = {'\0', '\n', '\r', '\t'} + for c in value: + if c in suspiciousChars: + return err(createValidationError( + "Invalid multiaddress '$1' for field '$2': contains invalid characters" % [ + value, fieldName + ] + )) + + return ok() + +proc validateDuration*(value: string, fieldName: string): TomlValidationResult = + if value.len == 0: + return err(createValidationError( + "Invalid duration for field '$1': empty value" % [fieldName] + )) + + var numStr = "" + var unit = "" + var i = 0 + + while i < value.len and value[i] in {'0'..'9'}: + numStr.add(value[i]) + i += 1 + + if i < value.len: + unit = value[i..^1] + + if numStr.len == 0: + return err(createValidationError( + "Invalid duration '$1' for field '$2': missing numeric value" % [ + value, fieldName + ] + )) + + try: + let num = parseInt(numStr) + if num < 0: + return err(createValidationError( + "Invalid duration '$1' for field '$2': negative values not allowed" % [ + value, fieldName + ] + )) + except ValueError: + return err(createValidationError( + "Invalid duration '$1' for field '$2': not a valid number" % [ + value, fieldName + ] + )) + + let validUnits = ["s", "m", "h", "d", "ms", "us", "ns"] + if unit.len == 0 or unit notin validUnits: + return err(createValidationError( + "Invalid duration '$1' for field '$2': invalid unit '$3' (must be one of: $4)" % [ + value, fieldName, unit, validUnits.join(", ") + ] + )) + + return ok() + +proc validateEnum*(value: string, fieldName: string, validValues: seq[string]): TomlValidationResult = + if value notin validValues: + return err(createValidationError( + "Invalid value '$1' for field '$2': must be one of: $3" % [ + value, fieldName, validValues.join(", ") + ] + )) + return ok() + +proc validateRange*(value: string, fieldName: string, minVal: int, maxVal: int): TomlValidationResult = + try: + let num = parseInt(value) + if num < minVal or num > maxVal: + return err(createValidationError( + "Invalid value '$1' for field '$2': must be between $3 and $4" % [ + value, fieldName, $minVal, $maxVal + ] + )) + except ValueError: + return err(createValidationError( + "Invalid value '$1' for field '$2': not a valid integer" % [ + value, fieldName + ] + )) + return ok() + +proc validateBoolean*(value: string, fieldName: string): TomlValidationResult = + let lowerValue = value.toLowerAscii() + if lowerValue notin ["true", "false"]: + return err(createValidationError( + "Invalid boolean value '$1' for field '$2': must be 'true' or 'false'" % [ + value, fieldName + ] + )) + return ok() + +proc validateFilePath*(value: string, fieldName: string): TomlValidationResult = + if value.len == 0: + return err(createValidationError( + "Invalid file path for field '$1': empty value" % [fieldName] + )) + + if '\0' in value: + return err(createValidationError( + "Invalid file path '$1' for field '$2': contains null byte" % [ + value, fieldName + ] + )) + + let suspiciousChars = {'\0', '\n', '\r'} + for c in value: + if c in suspiciousChars: + return err(createValidationError( + "Invalid file path '$1' for field '$2': contains invalid characters" % [ + value, fieldName + ] + )) + + return ok() + +proc validateEthAddress*(value: string, fieldName: string): TomlValidationResult = + if value.len == 0: + return ok() + + if value.len != 42: + return err(createValidationError( + "Invalid Ethereum address '$1' for field '$2': must be 42 characters (0x + 40 hex digits)" % [ + value, fieldName + ] + )) + + if not value.startsWith("0x"): + return err(createValidationError( + "Invalid Ethereum address '$1' for field '$2': must start with '0x'" % [ + value, fieldName + ] + )) + + let hexPart = value[2..^1] + let hexPattern = re"^[0-9a-fA-F]{40}$" + if not hexPart.match(hexPattern): + return err(createValidationError( + "Invalid Ethereum address '$1' for field '$2': contains invalid hex characters" % [ + value, fieldName + ] + )) + + return ok() + +proc validateContent*(toml: string): TomlValidationResult = + var lineNum = 1 + for line in toml.splitLines(): + let trimmed = line.strip() + + if trimmed.len == 0 or trimmed.startsWith('#'): + lineNum += 1 + continue + + if trimmed.startsWith('[') and trimmed.endsWith(']'): + lineNum += 1 + continue + + let eqPos = trimmed.find('=') + if eqPos > 0: + let key = trimmed[0.. Date: Tue, 10 Mar 2026 10:43:42 -0400 Subject: [PATCH 07/16] chore(lib): remove debug validation files --- library/test_toml_validation.nim | 755 ------------------------------ library/toml_validation.nim | 769 ------------------------------- 2 files changed, 1524 deletions(-) delete mode 100644 library/test_toml_validation.nim delete mode 100644 library/toml_validation.nim diff --git a/library/test_toml_validation.nim b/library/test_toml_validation.nim deleted file mode 100644 index d250edee..00000000 --- a/library/test_toml_validation.nim +++ /dev/null @@ -1,755 +0,0 @@ -## TOML Validation Tests -## -## Comprehensive tests for the TOML validation module to ensure -## all validation functions work correctly. - -import std/[unittest, strutils] -import results -import ./toml_validation - -suite "TOML Validation Tests": - - suite "Size Validation": - test "Empty TOML should pass size validation": - let result = validateSize("") - check result.isOk - - test "Small TOML should pass size validation": - let toml = "logLevel = \"info\"" - let result = validateSize(toml) - check result.isOk - - test "TOML at max size should pass validation": - let toml = "a".repeat(DefaultMaxSize) - let result = validateSize(toml) - check result.isOk - - test "TOML exceeding max size should fail validation": - let toml = "a".repeat(DefaultMaxSize + 1) - let result = validateSize(toml) - check result.isErr - check "exceeds maximum size" in result.error.message - - test "Custom max size should be respected": - let config = TomlValidationConfig(maxSize: 100) - let toml = "a".repeat(101) - let result = validateSize(toml, config) - check result.isErr - - suite "Line Length Validation": - test "Normal lines should pass validation": - let toml = """ -logLevel = "info" -logFormat = "auto" -""" - let result = validateLineLength(toml) - check result.isOk - - test "Line at max length should pass validation": - let toml = "a".repeat(DefaultMaxLineLength) - let result = validateLineLength(toml) - check result.isOk - - test "Line exceeding max length should fail validation": - let toml = "a".repeat(DefaultMaxLineLength + 1) - let result = validateLineLength(toml) - check result.isErr - check "exceeds maximum length" in result.error.message - check result.error.line == 1 - - test "Custom max line length should be respected": - let config = TomlValidationConfig(maxLineLength: 50) - let toml = "a".repeat(51) - let result = validateLineLength(toml, config) - check result.isErr - - suite "Syntax Validation": - test "Valid TOML should pass syntax validation": - let toml = """ -logLevel = "info" -logFormat = "auto" -metricsEnabled = true -""" - let result = validateSyntax(toml) - check result.isOk - - test "TOML with table headers should pass validation": - let toml = """ -[section] -key = "value" -""" - let result = validateSyntax(toml) - check result.isOk - - test "TOML with inline tables should pass validation": - let toml = """ -[section] -key = { name = "value" } -""" - let result = validateSyntax(toml) - check result.isOk - - test "TOML with inline tables disabled should fail": - let config = TomlValidationConfig(allowInlineTables: false) - let toml = "key = { name = \"value\" }" - let result = validateSyntax(toml, config) - check result.isErr - check "Inline tables are not allowed" in result.error.message - - test "Unmatched closing bracket should fail validation": - let toml = """ -[section] -key = "value" -] -""" - let result = validateSyntax(toml) - check result.isErr - check "Unmatched closing bracket" in result.error.message - - test "Unmatched opening bracket should fail validation": - let toml = """ -[section -key = "value" -""" - let result = validateSyntax(toml) - check result.isErr - check "Unclosed bracket" in result.error.message - - test "Unmatched closing brace should fail validation": - let toml = """ -key = { name = "value" } -} -""" - let result = validateSyntax(toml) - check result.isErr - check "Unmatched closing brace" in result.error.message - - test "Null byte should fail validation": - let toml = "key = \"value\0\"" - let result = validateSyntax(toml) - check result.isErr - check "Null byte" in result.error.message - - test "Invalid control character should fail validation": - let toml = "key = \"value\x01\"" - let result = validateSyntax(toml) - check result.isErr - check "Invalid control character" in result.error.message - - suite "Security Validation": - test "Clean TOML should pass security validation": - let toml = """ -logLevel = "info" -logFormat = "auto" -""" - let result = validateSecurity(toml) - check result.isOk - - test "Script injection should be detected": - let toml = "key = \"\"" - let result = validateSecurity(toml) - check result.isErr - check "script injection" in result.error.message - - test "JavaScript injection should be detected": - let toml = "key = \"javascript:alert('xss')\"" - let result = validateSecurity(toml) - check result.isErr - check "JavaScript injection" in result.error.message - - test "Data URI injection should be detected": - let toml = "key = \"data:text/html,\"" - let result = validateSecurity(toml) - check result.isErr - check "data URI injection" in result.error.message - - test "Event handler injection should be detected": - let toml = "key = \"onclick=alert('xss')\"" - let result = validateSecurity(toml) - check result.isErr - check "event handler injection" in result.error.message - - test "Template injection should be detected": - let toml = "key = \"${malicious}\"" - let result = validateSecurity(toml) - check result.isErr - check "template injection" in result.error.message - - test "Path traversal should be detected": - let toml = "key = \"../../etc/passwd\"" - let result = validateSecurity(toml) - check result.isErr - check "path traversal" in result.error.message - - test "URL-encoded path traversal should be detected": - let toml = "key = \"%2e%2e%2fetc%2fpasswd\"" - let result = validateSecurity(toml) - check result.isErr - check "path traversal" in result.error.message - - test "Command injection should be detected": - let toml = "key = \"value; rm -rf /\"" - let result = validateSecurity(toml) - check result.isErr - check "command injection" in result.error.message - - test "Pipe command injection should be detected": - let toml = "key = \"value | cat /etc/passwd\"" - let result = validateSecurity(toml) - check result.isErr - check "command injection" in result.error.message - - test "Backtick command injection should be detected": - let toml = "key = \"`malicious command`\"" - let result = validateSecurity(toml) - check result.isErr - check "command injection" in result.error.message - - suite "Port Validation": - test "Valid port should pass validation": - let result = validatePort("8080", "testPort") - check result.isOk - - test "Port at minimum should pass validation": - let result = validatePort("1", "testPort") - check result.isOk - - test "Port at maximum should pass validation": - let result = validatePort("65535", "testPort") - check result.isOk - - test "Port below minimum should fail validation": - let result = validatePort("0", "testPort") - check result.isErr - check "must be between" in result.error.message - - test "Port above maximum should fail validation": - let result = validatePort("65536", "testPort") - check result.isErr - check "must be between" in result.error.message - - test "Invalid port string should fail validation": - let result = validatePort("invalid", "testPort") - check result.isErr - check "not a valid integer" in result.error.message - - suite "IP Address Validation": - test "Valid IPv4 address should pass validation": - let result = validateIpAddress("127.0.0.1", "testAddress") - check result.isOk - - test "Valid IPv4 address with high octets should pass": - let result = validateIpAddress("192.168.255.255", "testAddress") - check result.isOk - - test "IPv4 address with octet above 255 should fail": - let result = validateIpAddress("192.168.256.1", "testAddress") - check result.isErr - check "between 0 and 255" in result.error.message - - test "Invalid IPv4 address should fail validation": - let result = validateIpAddress("invalid", "testAddress") - check result.isErr - check "not a valid IPv4" in result.error.message - - test "Valid IPv6 address should pass validation": - let result = validateIpAddress("::1", "testAddress") - check result.isOk - - test "Valid IPv6 address with multiple segments should pass": - let result = validateIpAddress("2001:db8::1", "testAddress") - check result.isOk - - suite "MultiAddress Validation": - test "Valid multiaddress should pass validation": - let result = validateMultiAddress("/ip4/127.0.0.1/tcp/8080", "testMultiAddr") - check result.isOk - - test "Multiaddress without leading slash should fail": - let result = validateMultiAddress("ip4/127.0.0.1/tcp/8080", "testMultiAddr") - check result.isErr - check "must start with '/'" in result.error.message - - test "Multiaddress with null byte should fail": - let result = validateMultiAddress("/ip4/127.0.0.1/tcp/8080\0", "testMultiAddr") - check result.isErr - check "contains invalid characters" in result.error.message - - suite "Duration Validation": - test "Valid duration in seconds should pass": - let result = validateDuration("60s", "testDuration") - check result.isOk - - test "Valid duration in minutes should pass": - let result = validateDuration("5m", "testDuration") - check result.isOk - - test "Valid duration in hours should pass": - let result = validateDuration("1h", "testDuration") - check result.isOk - - test "Valid duration in days should pass": - let result = validateDuration("1d", "testDuration") - check result.isOk - - test "Valid duration in milliseconds should pass": - let result = validateDuration("500ms", "testDuration") - check result.isOk - - test "Empty duration should fail validation": - let result = validateDuration("", "testDuration") - check result.isErr - check "empty value" in result.error.message - - test "Duration without numeric value should fail": - let result = validateDuration("s", "testDuration") - check result.isErr - check "missing numeric value" in result.error.message - - test "Duration with invalid unit should fail": - let result = validateDuration("60x", "testDuration") - check result.isErr - check "invalid unit" in result.error.message - - test "Negative duration should fail validation": - let result = validateDuration("-60s", "testDuration") - check result.isErr - check "negative values not allowed" in result.error.message - - suite "Enum Validation": - test "Valid enum value should pass": - let result = validateEnum("info", "logLevel", ValidLogLevels) - check result.isOk - - test "Invalid enum value should fail": - let result = validateEnum("invalid", "logLevel", ValidLogLevels) - check result.isErr - check "must be one of" in result.error.message - - suite "Range Validation": - test "Value within range should pass": - let result = validateRange("100", "testValue", 1, 1000) - check result.isOk - - test "Value at minimum should pass": - let result = validateRange("1", "testValue", 1, 1000) - check result.isOk - - test "Value at maximum should pass": - let result = validateRange("1000", "testValue", 1, 1000) - check result.isOk - - test "Value below minimum should fail": - let result = validateRange("0", "testValue", 1, 1000) - check result.isErr - check "must be between" in result.error.message - - test "Value above maximum should fail": - let result = validateRange("1001", "testValue", 1, 1000) - check result.isErr - check "must be between" in result.error.message - - test "Invalid integer string should fail": - let result = validateRange("invalid", "testValue", 1, 1000) - check result.isErr - check "not a valid integer" in result.error.message - - suite "Boolean Validation": - test "True should pass validation": - let result = validateBoolean("true", "testBool") - check result.isOk - - test "False should pass validation": - let result = validateBoolean("false", "testBool") - check result.isOk - - test "Uppercase TRUE should pass validation": - let result = validateBoolean("TRUE", "testBool") - check result.isOk - - test "Uppercase FALSE should pass validation": - let result = validateBoolean("FALSE", "testBool") - check result.isOk - - test "Invalid boolean should fail validation": - let result = validateBoolean("invalid", "testBool") - check result.isErr - check "must be 'true' or 'false'" in result.error.message - - suite "File Path Validation": - test "Valid file path should pass": - let result = validateFilePath("/path/to/file", "testPath") - check result.isOk - - test "Relative file path should pass": - let result = validateFilePath("relative/path", "testPath") - check result.isOk - - test "Empty file path should fail": - let result = validateFilePath("", "testPath") - check result.isErr - check "empty value" in result.error.message - - test "File path with null byte should fail": - let result = validateFilePath("/path\0/file", "testPath") - check result.isErr - check "contains null byte" in result.error.message - - suite "Ethereum Address Validation": - test "Valid Ethereum address should pass": - let result = validateEthAddress("0x1234567890123456789012345678901234567890", "testAddress") - check result.isOk - - test "Empty Ethereum address should pass (optional field)": - let result = validateEthAddress("", "testAddress") - check result.isOk - - test "Ethereum address without 0x prefix should fail": - let result = validateEthAddress("1234567890123456789012345678901234567890", "testAddress") - check result.isErr - check "must start with '0x'" in result.error.message - - test "Ethereum address with wrong length should fail": - let result = validateEthAddress("0x123456789012345678901234567890123456789", "testAddress") - check result.isErr - check "must be 42 characters" in result.error.message - - test "Ethereum address with invalid hex should fail": - let result = validateEthAddress("0x123456789012345678901234567890123456789g", "testAddress") - check result.isErr - check "invalid hex characters" in result.error.message - - suite "Content Validation": - test "Valid logLevel should pass": - let toml = "logLevel = \"info\"" - let result = validateContent(toml) - check result.isOk - - test "Invalid logLevel should fail": - let toml = "logLevel = \"invalid\"" - let result = validateContent(toml) - check result.isErr - check "logLevel" in result.error.message - - test "Valid logFormat should pass": - let toml = "logFormat = \"auto\"" - let result = validateContent(toml) - check result.isOk - - test "Invalid logFormat should fail": - let toml = "logFormat = \"invalid\"" - let result = validateContent(toml) - check result.isErr - check "logFormat" in result.error.message - - test "Valid repoKind should pass": - let toml = "repoKind = \"fs\"" - let result = validateContent(toml) - check result.isOk - - test "Invalid repoKind should fail": - let toml = "repoKind = \"invalid\"" - let result = validateContent(toml) - check result.isErr - check "repoKind" in result.error.message - - test "Valid proverBackend should pass": - let toml = "proverBackend = \"nimgroth16\"" - let result = validateContent(toml) - check result.isOk - - test "Invalid proverBackend should fail": - let toml = "proverBackend = \"invalid\"" - let result = validateContent(toml) - check result.isErr - check "proverBackend" in result.error.message - - test "Valid curve should pass": - let toml = "curve = \"bn128\"" - let result = validateContent(toml) - check result.isOk - - test "Invalid curve should fail": - let toml = "curve = \"invalid\"" - let result = validateContent(toml) - check result.isErr - check "curve" in result.error.message - - test "Valid metricsPort should pass": - let toml = "metricsPort = 8008" - let result = validateContent(toml) - check result.isOk - - test "Invalid metricsPort should fail": - let toml = "metricsPort = 99999" - let result = validateContent(toml) - check result.isErr - check "metricsPort" in result.error.message - - test "Valid maxPeers should pass": - let toml = "maxPeers = 160" - let result = validateContent(toml) - check result.isOk - - test "Invalid maxPeers should fail": - let toml = "maxPeers = 99999" - let result = validateContent(toml) - check result.isErr - check "maxPeers" in result.error.message - - test "Valid numThreads should pass": - let toml = "numThreads = 4" - let result = validateContent(toml) - check result.isOk - - test "Invalid numThreads should fail": - let toml = "numThreads = 999" - let result = validateContent(toml) - check result.isErr - check "numThreads" in result.error.message - - test "Valid validatorMaxSlots should pass": - let toml = "validatorMaxSlots = 1000" - let result = validateContent(toml) - check result.isOk - - test "Invalid validatorMaxSlots should fail": - let toml = "validatorMaxSlots = -1" - let result = validateContent(toml) - check result.isErr - check "validatorMaxSlots" in result.error.message - - test "Valid validatorGroups should pass": - let toml = "validatorGroups = 16" - let result = validateContent(toml) - check result.isOk - - test "Invalid validatorGroups should fail": - let toml = "validatorGroups = 1" - let result = validateContent(toml) - check result.isErr - check "validatorGroups" in result.error.message - - test "Valid maxSlotDepth should pass": - let toml = "maxSlotDepth = 16" - let result = validateContent(toml) - check result.isOk - - test "Invalid maxSlotDepth should fail": - let toml = "maxSlotDepth = 100" - let result = validateContent(toml) - check result.isErr - check "maxSlotDepth" in result.error.message - - test "Valid cacheSize should pass": - let toml = "cacheSize = 0" - let result = validateContent(toml) - check result.isOk - - test "Invalid cacheSize should fail": - let toml = "cacheSize = -1" - let result = validateContent(toml) - check result.isErr - check "cacheSize" in result.error.message - - test "Valid storageQuota should pass": - let toml = "storageQuota = 1073741824" - let result = validateContent(toml) - check result.isOk - - test "Invalid storageQuota should fail": - let toml = "storageQuota = 0" - let result = validateContent(toml) - check result.isErr - check "storageQuota" in result.error.message - - test "Valid blockTtl should pass": - let toml = "blockTtl = 3600" - let result = validateContent(toml) - check result.isOk - - test "Invalid blockTtl should fail": - let toml = "blockTtl = 100000" - let result = validateContent(toml) - check result.isErr - check "blockTtl" in result.error.message - - test "Valid blockMaintenanceInterval should pass": - let toml = "blockMaintenanceInterval = 3600" - let result = validateContent(toml) - check result.isOk - - test "Invalid blockMaintenanceInterval should fail": - let toml = "blockMaintenanceInterval = 10" - let result = validateContent(toml) - check result.isErr - check "blockMaintenanceInterval" in result.error.message - - test "Valid metricsAddress should pass": - let toml = "metricsAddress = \"127.0.0.1\"" - let result = validateContent(toml) - check result.isOk - - test "Invalid metricsAddress should fail": - let toml = "metricsAddress = \"invalid\"" - let result = validateContent(toml) - check result.isErr - check "metricsAddress" in result.error.message - - test "Valid netPrivKeyFile should pass": - let toml = "netPrivKeyFile = \"key\"" - let result = validateContent(toml) - check result.isOk - - test "Invalid netPrivKeyFile should fail": - let toml = "netPrivKeyFile = \"\"" - let result = validateContent(toml) - check result.isErr - check "netPrivKeyFile" in result.error.message - - test "Valid marketplaceAddress should pass": - let toml = "marketplaceAddress = \"0x1234567890123456789012345678901234567890\"" - let result = validateContent(toml) - check result.isOk - - test "Invalid marketplaceAddress should fail": - let toml = "marketplaceAddress = \"invalid\"" - let result = validateContent(toml) - check result.isErr - check "marketplaceAddress" in result.error.message - - test "Comments should be ignored": - let toml = """ -# This is a comment -logLevel = "info" -# Another comment -""" - let result = validateContent(toml) - check result.isOk - - test "Table headers should be ignored": - let toml = """ -[section] -logLevel = "info" -""" - let result = validateContent(toml) - check result.isOk - - suite "Comprehensive TOML Validation": - test "Empty TOML should pass all validations": - let result = validateToml("") - check result.isOk - - test "Valid TOML should pass all validations": - let toml = """ -logLevel = "info" -logFormat = "auto" -metricsEnabled = true -metricsPort = 8008 -maxPeers = 160 -numThreads = 4 -""" - let result = validateToml(toml) - check result.isOk - - test "TOML with size violation should fail": - let toml = "a".repeat(DefaultMaxSize + 1) - let result = validateToml(toml) - check result.isErr - - test "TOML with syntax error should fail": - let toml = """ -logLevel = "info" -] -""" - let result = validateToml(toml) - check result.isErr - - test "TOML with security issue should fail": - let toml = "key = \"\"" - let result = validateToml(toml) - check result.isErr - - test "TOML with invalid content should fail": - let toml = "logLevel = \"invalid\"" - let result = validateToml(toml) - check result.isErr - - test "Complex valid TOML should pass all validations": - let toml = """ -logLevel = "info" -logFormat = "auto" -metricsEnabled = true -metricsAddress = "127.0.0.1" -metricsPort = 8008 -dataDir = "/tmp/archivist" -listenAddrs = ["/ip4/0.0.0.0/tcp/0"] -nat = "any" -discoveryPort = 8090 -netPrivKeyFile = "key" -maxPeers = 160 -numThreads = 4 -agentString = "Archivist Node" -apiBindAddress = "127.0.0.1" -apiPort = 8080 -repoKind = "fs" -storageQuota = 1073741824 -blockTtl = 3600 -blockMaintenanceInterval = 3600 -blockMaintenanceNumberOfBlocks = 100 -cacheSize = 0 -persistence = false -ethProvider = "ws://localhost:8545" -useSystemClock = false -validator = false -prover = false -circuitDir = "/tmp/archivist/circuits" -proverBackend = "nimgroth16" -curve = "bn128" -numProofSamples = 10 -maxSlotDepth = 16 -maxDatasetDepth = 16 -maxBlockDepth = 16 -maxCellElms = 256 -""" - let result = validateToml(toml) - check result.isOk - - suite "CString Validation": - test "nil cstring should pass validation": - let result = validateTomlCString(nil) - check result.isOk - - test "Valid cstring should pass validation": - let toml = "logLevel = \"info\"" - let result = validateTomlCString(toml) - check result.isOk - - test "Invalid cstring should fail validation": - let toml = "logLevel = \"invalid\"" - let result = validateTomlCString(toml) - check result.isErr - - suite "Error Formatting": - test "Error without line/column should format correctly": - let error = createValidationError("Test error message") - let formatted = formatError(error) - check "TOML validation error: Test error message" == formatted - - test "Error with line should format correctly": - let error = createValidationError("Test error message", line = 10) - let formatted = formatError(error) - check "line 10" in formatted - check "Test error message" in formatted - - test "Error with line and column should format correctly": - let error = createValidationError("Test error message", line = 10, column = 5) - let formatted = formatError(error) - check "line 10, column 5" in formatted - check "Test error message" in formatted - - test "Error with context should format correctly": - let error = createValidationError("Test error message", context = "Additional context") - let formatted = formatError(error) - check "Test error message" in formatted - check "Additional context" in formatted diff --git a/library/toml_validation.nim b/library/toml_validation.nim deleted file mode 100644 index 865119b6..00000000 --- a/library/toml_validation.nim +++ /dev/null @@ -1,769 +0,0 @@ -## TOML Input Validation Module -## -## This module provides comprehensive validation for TOML configuration input -## to prevent security issues and provide better error messages. -## -## Features: -## - Size limits to prevent resource exhaustion -## - Syntax validation before parsing -## - Content validation for specific configuration values -## - Security validation to prevent injection attacks -## - Clear, actionable error messages - -{.push raises: [].} - -import std/[strutils, re, parseutils, unicode] -import results - -type - TomlValidationError* = object - message*: string - line*: int - column*: int - context*: string - - TomlValidationResult* = Result[void, TomlValidationError] - - TomlValidationConfig* = object - maxSize*: int - maxLineLength*: int - maxNestingDepth*: int - maxArrayLength*: int - allowInlineTables*: bool - allowMultilineStrings*: bool - -const - DefaultMaxSize* = 1_000_000 # 1MB max TOML size - DefaultMaxLineLength* = 10_000 # 10KB max line length - DefaultMaxNestingDepth* = 50 # Max nesting depth for tables - DefaultMaxArrayLength* = 10_000 # Max array elements - DefaultValidationConfig* = TomlValidationConfig( - maxSize: DefaultMaxSize, - maxLineLength: DefaultMaxLineLength, - maxNestingDepth: DefaultMaxNestingDepth, - maxArrayLength: DefaultMaxArrayLength, - allowInlineTables: true, - allowMultilineStrings: true - ) - - ValidLogLevels* = ["trace", "debug", "info", "notice", "warn", "error", "fatal"] - - ValidLogFormats* = ["auto", "colors", "nocolors", "json", "none"] - - ValidRepoKinds* = ["fs", "sqlite", "leveldb"] - - ValidProverBackends* = ["nimgroth16", "circomcompat"] - - ValidCurves* = ["bn128"] - - ValidNatStrategies* = ["any", "none", "upnp", "pmp"] - - ValidPortRange* = 1..65535 - - ValidThreadCountRange* = 0..256 - - ValidValidatorGroupsRange* = 2..65535 - - ValidMaxSlotsRange* = 0..1000000 - - ValidMaxDepthRange* = 1..64 - - ValidMaxCellElementsRange* = 1..256 - - ValidCacheSizeRange* = 0..1_000_000_000 # 0 to 1GB - - ValidStorageQuotaRange* = 1_048_576..1_000_000_000_000 # 1MB to 1TB - - ValidBlockTtlRange* = 0..86400 # 0 to 24 hours - - ValidBlockMaintenanceIntervalRange* = 60..86400 # 1 minute to 24 hours - - ValidBlockMaintenanceNumberOfBlocksRange* = 1..100000 - - ValidMaxPeersRange* = 1..1000 - - ValidDiscoveryPortRange* = 1024..65535 - - ValidMetricsPortRange* = 1024..65535 - - ValidApiPortRange* = 1024..65535 - - ValidMaxPriorityFeePerGasRange* = 0..1_000_000_000_000 # 0 to 1 trillion wei - - ValidNumProofSamplesRange* = 1..1000 - - ValidMarketplaceRequestCacheSizeRange* = 1..65535 - - ValidValidatorGroupIndexRange* = 0..65534 - -proc createValidationError*( - message: string, - line: int = 0, - column: int = 0, - context: string = "" -): TomlValidationError = - TomlValidationError( - message: message, - line: line, - column: column, - context: context - ) - -proc formatError*(error: TomlValidationError): string = - if error.line > 0: - if error.column > 0: - result = "TOML validation error at line $1, column $2: $3" % [ - $error.line, $error.column, error.message - ] - else: - result = "TOML validation error at line $1: $2" % [ - $error.line, error.message - ] - else: - result = "TOML validation error: $1" % [error.message] - - if error.context.len > 0: - result.add("\nContext: " & error.context) - -proc validateSize*(toml: string, config: TomlValidationConfig = DefaultValidationConfig): TomlValidationResult = - if toml.len > config.maxSize: - return err(createValidationError( - "TOML configuration exceeds maximum size of $1 bytes (got $2 bytes)" % [ - $config.maxSize, $toml.len - ] - )) - return ok() - -proc validateLineLength*(toml: string, config: TomlValidationConfig = DefaultValidationConfig): TomlValidationResult = - var lineNum = 1 - for line in toml.splitLines(): - if line.len > config.maxLineLength: - return err(createValidationError( - "Line exceeds maximum length of $1 characters (got $2 characters)" % [ - $config.maxLineLength, $line.len - ], - line = lineNum - )) - lineNum += 1 - return ok() - -proc validateSyntax*(toml: string, config: TomlValidationConfig = DefaultValidationConfig): TomlValidationResult = - var lineNum = 1 - var columnNum = 1 - var i = 0 - let len = toml.len - - while i < len: - let c = toml[i] - - # Check for null bytes - if c == '\0': - return err(createValidationError( - "Null byte found in TOML configuration", - line = lineNum, - column = columnNum - )) - - # Check for control characters (except newline, tab, and carriage return) - if c < ' ' and c notin {'\n', '\t', '\r'}: - return err(createValidationError( - "Invalid control character found in TOML configuration", - line = lineNum, - column = columnNum - )) - - # Track line and column numbers - if c == '\n': - lineNum += 1 - columnNum = 1 - elif c == '\r': - # Skip carriage return if followed by newline - if i + 1 < len and toml[i + 1] == '\n': - i += 1 - lineNum += 1 - columnNum = 1 - else: - columnNum += 1 - - i += 1 - - # Check for balanced brackets - var bracketStack: seq[char] = @[] - lineNum = 1 - columnNum = 1 - i = 0 - - while i < len: - let c = toml[i] - - case c - of '[': - bracketStack.add('[') - of ']': - if bracketStack.len == 0 or bracketStack[^1] != '[': - return err(createValidationError( - "Unmatched closing bracket ']'", - line = lineNum, - column = columnNum - )) - bracketStack.delete(bracketStack.high) - of '{': - if not config.allowInlineTables: - return err(createValidationError( - "Inline tables are not allowed", - line = lineNum, - column = columnNum - )) - bracketStack.add('{') - of '}': - if bracketStack.len == 0 or bracketStack[^1] != '{': - return err(createValidationError( - "Unmatched closing brace '}'", - line = lineNum, - column = columnNum - )) - bracketStack.delete(bracketStack.high) - else: - discard - - # Track line and column numbers - if c == '\n': - lineNum += 1 - columnNum = 1 - elif c == '\r': - if i + 1 < len and toml[i + 1] == '\n': - i += 1 - lineNum += 1 - columnNum = 1 - else: - columnNum += 1 - - i += 1 - - # Check for unclosed brackets - if bracketStack.len > 0: - let unclosed = bracketStack[^1] - return err(createValidationError( - "Unclosed bracket '$1' found at end of TOML configuration" % [$unclosed], - line = lineNum, - column = columnNum - )) - - return ok() - -proc validateSecurity*(toml: string): TomlValidationResult = - let suspiciousPatterns = [ - (re"]*>.*?", "Potential script injection"), - (re"javascript:", "Potential JavaScript injection"), - (re"data:text/html", "Potential data URI injection"), - (re"on\\w+\\s*=", "Potential event handler injection"), - (re"\\$\\{.*?\\}", "Potential template injection"), - (re"\\x[0-9a-fA-F]{2}", "Potential hex escape injection"), - (re"\\u[0-9a-fA-F]{4}", "Potential Unicode escape injection"), - (re"\\U[0-9a-fA-F]{8}", "Potential Unicode escape injection"), - (re"\\n\\s*\\n\\s*\\n", "Excessive blank lines (potential DoS)"), - (re"\\[\\s*\\[\\s*\\[", "Excessive array nesting (potential DoS)"), - ] - - for (pattern, description) in suspiciousPatterns: - if toml.find(pattern) != -1: - return err(createValidationError( - "Security validation failed: $1" % [description] - )) - - # Check for path traversal attempts - let pathTraversalPatterns = [ - re"\\.\\.[\\\\/]", - re"%2e%2e", - re"%252e%252e", - ] - - for pattern in pathTraversalPatterns: - if toml.find(pattern) != -1: - return err(createValidationError( - "Security validation failed: potential path traversal attempt detected" - )) - - # Check for command injection patterns - let commandInjectionPatterns = [ - re";\\s*\\w+\\s*=", - re"\\|\\s*\\w+", - re"`[^`]*`", - re"\\$\\([^)]*\\)", - re"\\$\\{[^}]*\\}", - ] - - for pattern in commandInjectionPatterns: - if toml.find(pattern) != -1: - return err(createValidationError( - "Security validation failed: potential command injection attempt detected" - )) - - return ok() - -proc validatePort*(value: string, fieldName: string): TomlValidationResult = - ## Validate a port number - try: - let port = parseInt(value) - if port notin ValidPortRange: - return err(createValidationError( - "Invalid port number '$1' for field '$2': must be between $3 and $4" % [ - value, fieldName, $ValidPortRange.a, $ValidPortRange.b - ] - )) - except ValueError: - return err(createValidationError( - "Invalid port number '$1' for field '$2': not a valid integer" % [ - value, fieldName - ] - )) - return ok() - -proc validateIpAddress*(value: string, fieldName: string): TomlValidationResult = - let ipv4Pattern = re"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$" - if value.match(ipv4Pattern): - let parts = value.split('.') - for part in parts: - try: - let num = parseInt(part) - if num < 0 or num > 255: - return err(createValidationError( - "Invalid IP address '$1' for field '$2': each octet must be between 0 and 255" % [ - value, fieldName - ] - )) - except ValueError: - return err(createValidationError( - "Invalid IP address '$1' for field '$2': not a valid IPv4 address" % [ - value, fieldName - ] - )) - return ok() - - let ipv6Pattern = re"^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}$" - if value.match(ipv6Pattern): - return ok() - - return err(createValidationError( - "Invalid IP address '$1' for field '$2': not a valid IPv4 or IPv6 address" % [ - value, fieldName - ] - )) - -proc validateMultiAddress*(value: string, fieldName: string): TomlValidationResult = - if not value.startsWith('/'): - return err(createValidationError( - "Invalid multiaddress '$1' for field '$2': must start with '/'" % [ - value, fieldName - ] - )) - - let suspiciousChars = {'\0', '\n', '\r', '\t'} - for c in value: - if c in suspiciousChars: - return err(createValidationError( - "Invalid multiaddress '$1' for field '$2': contains invalid characters" % [ - value, fieldName - ] - )) - - return ok() - -proc validateDuration*(value: string, fieldName: string): TomlValidationResult = - if value.len == 0: - return err(createValidationError( - "Invalid duration for field '$1': empty value" % [fieldName] - )) - - var numStr = "" - var unit = "" - var i = 0 - - while i < value.len and value[i] in {'0'..'9'}: - numStr.add(value[i]) - i += 1 - - if i < value.len: - unit = value[i..^1] - - if numStr.len == 0: - return err(createValidationError( - "Invalid duration '$1' for field '$2': missing numeric value" % [ - value, fieldName - ] - )) - - try: - let num = parseInt(numStr) - if num < 0: - return err(createValidationError( - "Invalid duration '$1' for field '$2': negative values not allowed" % [ - value, fieldName - ] - )) - except ValueError: - return err(createValidationError( - "Invalid duration '$1' for field '$2': not a valid number" % [ - value, fieldName - ] - )) - - let validUnits = ["s", "m", "h", "d", "ms", "us", "ns"] - if unit.len == 0 or unit notin validUnits: - return err(createValidationError( - "Invalid duration '$1' for field '$2': invalid unit '$3' (must be one of: $4)" % [ - value, fieldName, unit, validUnits.join(", ") - ] - )) - - return ok() - -proc validateEnum*(value: string, fieldName: string, validValues: seq[string]): TomlValidationResult = - if value notin validValues: - return err(createValidationError( - "Invalid value '$1' for field '$2': must be one of: $3" % [ - value, fieldName, validValues.join(", ") - ] - )) - return ok() - -proc validateRange*(value: string, fieldName: string, minVal: int, maxVal: int): TomlValidationResult = - try: - let num = parseInt(value) - if num < minVal or num > maxVal: - return err(createValidationError( - "Invalid value '$1' for field '$2': must be between $3 and $4" % [ - value, fieldName, $minVal, $maxVal - ] - )) - except ValueError: - return err(createValidationError( - "Invalid value '$1' for field '$2': not a valid integer" % [ - value, fieldName - ] - )) - return ok() - -proc validateBoolean*(value: string, fieldName: string): TomlValidationResult = - let lowerValue = value.toLowerAscii() - if lowerValue notin ["true", "false"]: - return err(createValidationError( - "Invalid boolean value '$1' for field '$2': must be 'true' or 'false'" % [ - value, fieldName - ] - )) - return ok() - -proc validateFilePath*(value: string, fieldName: string): TomlValidationResult = - if value.len == 0: - return err(createValidationError( - "Invalid file path for field '$1': empty value" % [fieldName] - )) - - if '\0' in value: - return err(createValidationError( - "Invalid file path '$1' for field '$2': contains null byte" % [ - value, fieldName - ] - )) - - let suspiciousChars = {'\0', '\n', '\r'} - for c in value: - if c in suspiciousChars: - return err(createValidationError( - "Invalid file path '$1' for field '$2': contains invalid characters" % [ - value, fieldName - ] - )) - - return ok() - -proc validateEthAddress*(value: string, fieldName: string): TomlValidationResult = - if value.len == 0: - return ok() - - if value.len != 42: - return err(createValidationError( - "Invalid Ethereum address '$1' for field '$2': must be 42 characters (0x + 40 hex digits)" % [ - value, fieldName - ] - )) - - if not value.startsWith("0x"): - return err(createValidationError( - "Invalid Ethereum address '$1' for field '$2': must start with '0x'" % [ - value, fieldName - ] - )) - - let hexPart = value[2..^1] - let hexPattern = re"^[0-9a-fA-F]{40}$" - if not hexPart.match(hexPattern): - return err(createValidationError( - "Invalid Ethereum address '$1' for field '$2': contains invalid hex characters" % [ - value, fieldName - ] - )) - - return ok() - -proc validateContent*(toml: string): TomlValidationResult = - var lineNum = 1 - for line in toml.splitLines(): - let trimmed = line.strip() - - if trimmed.len == 0 or trimmed.startsWith('#'): - lineNum += 1 - continue - - if trimmed.startsWith('[') and trimmed.endsWith(']'): - lineNum += 1 - continue - - let eqPos = trimmed.find('=') - if eqPos > 0: - let key = trimmed[0.. Date: Tue, 10 Mar 2026 10:44:03 -0400 Subject: [PATCH 08/16] chore(tests): remove old test files --- library/tests/test_callback_safety.nim | 168 ------- library/tests/test_ffi.c | 621 ------------------------- library/tests/test_runner.nim | 45 -- 3 files changed, 834 deletions(-) delete mode 100644 library/tests/test_callback_safety.nim delete mode 100644 library/tests/test_ffi.c delete mode 100644 library/tests/test_runner.nim diff --git a/library/tests/test_callback_safety.nim b/library/tests/test_callback_safety.nim deleted file mode 100644 index 978aec41..00000000 --- a/library/tests/test_callback_safety.nim +++ /dev/null @@ -1,168 +0,0 @@ -## Test Callback Safety -## -## This file tests the safe string pointer usage in callbacks to ensure -## memory safety and thread safety are properly handled. - -import std/[unittest, strutils, os] -import ffi_types -import alloc - -suite "Callback Safety Tests": - - test "safeCallback with empty string": - var callbackCalled = false - var callbackRetCode: cint - var callbackUserData: pointer - - let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = - callbackCalled = true - callbackRetCode = retCode - callbackUserData = userData - - let userData = cast[pointer](0x12345) - safeCallback(testCallback, RET_OK, "", userData) - - check(callbackCalled) - check(callbackRetCode == RET_OK) - check(callbackUserData == userData) - - test "safeCallback with non-empty string": - var callbackCalled = false - - let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = - callbackCalled = true - # Just verify the callback was called with non-nil message - if not msg.isNil and len > 0: - # Message is valid - discard - - let testMsg = "Hello, World!" - safeCallback(testCallback, RET_ERR, testMsg, nil) - - check(callbackCalled) - - test "createCallbackString with empty string": - let cbStr = createCallbackString("") - check(cbStr.data.isNil) - check(cbStr.len == cast[csize_t](0)) - cbStr.freeCallbackString() - - test "createCallbackString with non-empty string": - let testMsg = "Test message" - let cbStr = createCallbackString(testMsg) - check(not cbStr.data.isNil) - check(cbStr.len == cast[csize_t](testMsg.len)) - cbStr.freeCallbackString() - - test "createCallbackString with cstring": - let testMsg = "C string test" - let cStr = testMsg.cstring - let cbStr = createCallbackString(cStr) - check(not cbStr.data.isNil) - check(cbStr.len == cast[csize_t](testMsg.len)) - cbStr.freeCallbackString() - - test "validateCString with valid string": - let testMsg = "Valid string" - let cStr = testMsg.cstring - check(validateCString(cStr)) - - test "validateCString with nil string": - let cStr: cstring = nil - check(not validateCString(cStr)) - - test "validateStringPtr with valid pointer": - let testMsg = "Valid pointer test" - var msgCopy = testMsg - let msgPtr = cast[ptr cchar](addr msgCopy[0]) - check(validateStringPtr(msgPtr, cast[csize_t](testMsg.len))) - - test "validateStringPtr with nil pointer": - let msgPtr: ptr cchar = nil - check(not validateStringPtr(msgPtr, cast[csize_t](10))) - - test "safeStringCopy with valid cstring": - let testMsg = "Safe copy test" - let cStr = testMsg.cstring - let copied = safeStringCopy(cStr, cast[csize_t](100)) - check(copied == testMsg) - - test "safeStringCopy with nil cstring": - let cStr: cstring = nil - let copied = safeStringCopy(cStr, cast[csize_t](100)) - check(copied == "") - - test "safeStringCopy with length limit": - let testMsg = "This is a long string that should be truncated" - let cStr = testMsg.cstring - let copied = safeStringCopy(cStr, cast[csize_t](10)) - check(copied.len <= 10) - - test "success helper function": - var callbackCalled = false - - let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = - callbackCalled = true - - let result = success(testCallback, "Success message", nil) - check(result == RET_OK) - check(callbackCalled) - - test "error helper function": - var callbackCalled = false - - let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = - callbackCalled = true - - let result = error(testCallback, "Test error", nil) - check(result == RET_ERR) - check(callbackCalled) - - test "progress helper function": - var callbackCalled = false - - let testCallback = proc(retCode: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} = - callbackCalled = true - - let result = progress(testCallback, "Progress data", nil) - check(result == RET_OK) - check(callbackCalled) - - test "SharedSeq allocation and deallocation": - let originalSeq = @[1'u8, 2'u8, 3'u8, 4'u8, 5'u8] - var sharedSeq = allocSharedSeq(originalSeq) - - check(sharedSeq.len == originalSeq.len) - check(not sharedSeq.data.isNil) - - # Verify content - for i in 0.. 0: - # Message is valid - discard - - # Call callback multiple times with different messages - for i in 0..<10: - let msg = "Message " & $i - safeCallback(testCallback, RET_OK, msg, nil) - - check(callbackCount == 10) - -when isMainModule: - echo "Running callback safety tests..." \ No newline at end of file diff --git a/library/tests/test_ffi.c b/library/tests/test_ffi.c deleted file mode 100644 index 8cdae7af..00000000 --- a/library/tests/test_ffi.c +++ /dev/null @@ -1,621 +0,0 @@ -/* test_ffi.c - Simple C test program for libarchivist FFI - * - * This program tests the basic FFI functionality to ensure the library works correctly. - */ - -#include -#include -#include -#include -#include "libarchivist.h" - -static int callback_status = 0; -static char* callback_data = NULL; -static size_t callback_data_len = 0; -static void* callback_user_data = NULL; - -void test_callback(int status, const char* data, size_t len, void* userData) { - callback_status = status; - if (data && len > 0) { - if (callback_data) { - free(callback_data); - } - callback_data = malloc(len + 1); - if (callback_data) { - memcpy(callback_data, data, len); - callback_data[len] = '\0'; - callback_data_len = len; - } - } else { - callback_data_len = 0; - } - callback_user_data = userData; -} - -int test_create_context() { - printf("Test: Create and destroy context\n"); - - void* ctx = archivist_new("", test_callback, (void*)0x1234); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - printf(" PASSED: Context created\n"); - - sleep(1); - - int result = archivist_destroy(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_destroy returned %d\n", result); - return 1; - } - printf(" PASSED: Context destroyed\n"); - - return 0; -} - -int test_config_null() { - printf("Test: Config with NULL\n"); - - void* ctx = archivist_new(NULL, test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - printf(" PASSED: Context created with NULL config\n"); - - sleep(1); - - // Verify the default data dir is used - int result = archivist_repo(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_repo returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Repo callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - if (callback_data && callback_data_len > 0) { - printf(" PASSED: Default repo: %s\n", callback_data); - } else { - printf(" WARNING: No repo data received\n"); - } - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int test_config_empty() { - printf("Test: Config with empty string\n"); - - void* ctx = archivist_new("", test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - printf(" PASSED: Context created with empty config\n"); - - sleep(1); - - // Verify the default data dir is used - int result = archivist_repo(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_repo returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Repo callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - if (callback_data && callback_data_len > 0) { - printf(" PASSED: Default repo: %s\n", callback_data); - } else { - printf(" WARNING: No repo data received\n"); - } - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int test_config_custom_data_dir() { - printf("Test: Config with custom data-dir\n"); - - // Use TOML format to set a custom data directory - const char* config = "data-dir = \"/tmp/archivist-test-custom\""; - void* ctx = archivist_new(config, test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - printf(" PASSED: Context created with custom data-dir config\n"); - - sleep(1); - - // Verify the custom data dir is used - int result = archivist_repo(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_repo returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Repo callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - if (callback_data && callback_data_len > 0) { - printf(" PASSED: Custom repo: %s\n", callback_data); - // Verify the path contains our custom directory - if (strstr(callback_data, "archivist-test-custom") != NULL) { - printf(" PASSED: Custom data-dir was applied correctly\n"); - } else { - printf(" FAILED: Custom data-dir was not applied\n"); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - } else { - printf(" FAILED: No repo data received\n"); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int test_config_invalid() { - printf("Test: Config with invalid TOML\n"); - - // Invalid TOML: missing closing quote - const char* config = "data-dir = \"/tmp/test"; - void* ctx = archivist_new(config, test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - printf(" PASSED: Context created (async error expected)\n"); - - sleep(2); - - // The error should be reported via callback - if (callback_status != 0) { - printf(" PASSED: Invalid config correctly returned error: %s\n", - callback_data ? callback_data : "unknown"); - } else { - printf(" WARNING: No error reported for invalid config\n"); - } - - // Clean up even if there was an error - if (ctx) { - archivist_destroy(ctx, test_callback, NULL); - } - return 0; -} - -int test_version() { - printf("Test: Get version\n"); - - void* ctx = archivist_new("", test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - - sleep(1); - - int result = archivist_version(ctx, test_callback, (void*)0x5678); - if (result != 0) { - printf(" FAILED: archivist_version returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - if (callback_data && callback_data_len > 0) { - printf(" PASSED: Version: %s\n", callback_data); - } else { - printf(" WARNING: No version data received\n"); - } - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int test_peer_id() { - printf("Test: Get peer ID\n"); - - void* ctx = archivist_new("", test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - - sleep(1); - - int result = archivist_create(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_create returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Create callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - callback_status = 0; - if (callback_data) { - free(callback_data); - callback_data = NULL; - callback_data_len = 0; - } - - result = archivist_peer_id(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_peer_id returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - if (callback_data && callback_data_len > 0) { - printf(" PASSED: Peer ID: %s\n", callback_data); - } else { - printf(" WARNING: No peer ID data received\n"); - } - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int test_debug() { - printf("Test: Debug\n"); - - void* ctx = archivist_new("", test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - - sleep(1); - - int result = archivist_debug(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_debug returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - if (callback_data && callback_data_len > 0) { - printf(" PASSED: Debug info received\n"); - } else { - printf(" WARNING: No debug data received\n"); - } - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int test_connected_peers() { - printf("Test: Connected peers\n"); - - void* ctx = archivist_new("", test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - - sleep(1); - - int result = archivist_create(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_create returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Create callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - callback_status = 0; - if (callback_data) { - free(callback_data); - callback_data = NULL; - callback_data_len = 0; - } - - result = archivist_connected_peers(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_connected_peers returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - if (callback_data && callback_data_len > 0) { - printf(" PASSED: Connected peers: %s\n", callback_data); - } else { - printf(" PASSED: No connected peers (expected)\n"); - } - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int test_storage_list() { - printf("Test: Storage list\n"); - - void* ctx = archivist_new("", test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - - sleep(1); - - int result = archivist_create(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_create returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Create callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - callback_status = 0; - if (callback_data) { - free(callback_data); - callback_data = NULL; - callback_data_len = 0; - } - - result = archivist_list(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_list returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - if (callback_data && callback_data_len > 0) { - printf(" PASSED: Storage list: %s\n", callback_data); - } else { - printf(" PASSED: Empty storage list (expected)\n"); - } - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int test_storage_space() { - printf("Test: Storage space\n"); - - void* ctx = archivist_new("", test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - - sleep(1); - - int result = archivist_create(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_create returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Create callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - callback_status = 0; - if (callback_data) { - free(callback_data); - callback_data = NULL; - callback_data_len = 0; - } - - result = archivist_space(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_space returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(1); - - if (callback_status != 0) { - printf(" FAILED: Callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - if (callback_data && callback_data_len > 0) { - printf(" PASSED: Storage space: %s\n", callback_data); - } else { - printf(" WARNING: No storage space data received\n"); - } - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int test_start_stop() { - printf("Test: Start and stop\n"); - - void* ctx = archivist_new("", test_callback, NULL); - if (!ctx) { - printf(" FAILED: archivist_new returned NULL\n"); - return 1; - } - - sleep(1); - - int result = archivist_start(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_start returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(2); - - if (callback_status != 0) { - printf(" FAILED: Start callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - printf(" PASSED: Node started\n"); - - result = archivist_stop(ctx, test_callback, NULL); - if (result != 0) { - printf(" FAILED: archivist_stop returned %d\n", result); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - - sleep(2); - - if (callback_status != 0) { - printf(" FAILED: Stop callback status %d\n", callback_status); - archivist_destroy(ctx, test_callback, NULL); - return 1; - } - printf(" PASSED: Node stopped\n"); - - archivist_destroy(ctx, test_callback, NULL); - return 0; -} - -int main(int argc, char** argv) { - (void)argc; - (void)argv; - - printf("=== Archivist FFI Test Suite ===\n\n"); - - int failed = 0; - - // Configuration parsing tests - failed += test_config_null(); - printf("\n"); - - failed += test_config_empty(); - printf("\n"); - - failed += test_config_custom_data_dir(); - printf("\n"); - - failed += test_config_invalid(); - printf("\n"); - - // Original tests - failed += test_create_context(); - printf("\n"); - - failed += test_version(); - printf("\n"); - - failed += test_peer_id(); - printf("\n"); - - failed += test_debug(); - printf("\n"); - - failed += test_connected_peers(); - printf("\n"); - - failed += test_storage_list(); - printf("\n"); - - failed += test_storage_space(); - printf("\n"); - - failed += test_start_stop(); - printf("\n"); - - if (callback_data) { - free(callback_data); - } - - printf("=== Test Summary ===\n"); - if (failed == 0) { - printf("All tests PASSED\n"); - return 0; - } else { - printf("%d test(s) FAILED\n", failed); - return 1; - } -} diff --git a/library/tests/test_runner.nim b/library/tests/test_runner.nim deleted file mode 100644 index 9e47d3c6..00000000 --- a/library/tests/test_runner.nim +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env nim -## Test runner for library tests - -import os, strutils - -proc runTest(testFile: string): bool = - echo "Running test: ", testFile - let cmd = "nim c -r --hints:off " & testFile - let exitCode = execShellCmd(cmd) - if exitCode == 0: - echo "✓ ", testFile, " passed" - return true - else: - echo "✗ ", testFile, " failed (exit code: ", exitCode, ")" - return false - -proc main() = - let testDir = getCurrentDir() - let nimTests = toSeq(walkFiles(testDir / "test_*.nim")) - - if nimTests.len == 0: - echo "No Nim tests found in ", testDir - return - - echo "Running ", nimTests.len, " Nim test(s)..." - echo "=" .repeat(50) - - var passed = 0 - var failed = 0 - - for test in nimTests: - if runTest(test): - inc passed - else: - inc failed - echo "" - - echo "=" .repeat(50) - echo "Results: ", passed, " passed, ", failed, " failed" - - if failed > 0: - quit(1) - -when isMainModule: - main() \ No newline at end of file From 6ef01411b0cf50b101bca620e3f7f0f00e43c0f0 Mon Sep 17 00:00:00 2001 From: Xav Date: Tue, 10 Mar 2026 10:44:07 -0400 Subject: [PATCH 09/16] feat(tests): add comprehensive FFI test suite --- library/tests/test_ffi_config.c | 289 ++++++++++++++++++ library/tests/test_ffi_context.c | 236 +++++++++++++++ library/tests/test_ffi_download.c | 388 ++++++++++++++++++++++++ library/tests/test_ffi_edge_cases.c | 358 ++++++++++++++++++++++ library/tests/test_ffi_p2p.c | 407 +++++++++++++++++++++++++ library/tests/test_ffi_storage.c | 444 ++++++++++++++++++++++++++++ library/tests/test_ffi_upload.c | 388 ++++++++++++++++++++++++ library/tests/test_ffi_version.c | 319 ++++++++++++++++++++ 8 files changed, 2829 insertions(+) create mode 100644 library/tests/test_ffi_config.c create mode 100644 library/tests/test_ffi_context.c create mode 100644 library/tests/test_ffi_download.c create mode 100644 library/tests/test_ffi_edge_cases.c create mode 100644 library/tests/test_ffi_p2p.c create mode 100644 library/tests/test_ffi_storage.c create mode 100644 library/tests/test_ffi_upload.c create mode 100644 library/tests/test_ffi_version.c diff --git a/library/tests/test_ffi_config.c b/library/tests/test_ffi_config.c new file mode 100644 index 00000000..8f090128 --- /dev/null +++ b/library/tests/test_ffi_config.c @@ -0,0 +1,289 @@ +/* test_ffi_config.c - Configuration tests for libarchivist FFI + * + * This file tests TOML configuration parsing and validation. + */ + +#include +#include +#include +#include +#include "libarchivist.h" + +/* Test statistics */ +static int tests_run = 0; +static int tests_passed = 0; +static int tests_failed = 0; + +/* Callback state */ +static int callback_status = 0; +static char* callback_data = NULL; +static size_t callback_data_len = 0; +static void* callback_user_data = NULL; + +/* Helper functions */ +void reset_callback_state() { + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + } + callback_data_len = 0; + callback_user_data = NULL; +} + +void test_callback(int status, const char* data, size_t len, void* userData) { + callback_status = status; + callback_user_data = userData; + + if (data && len > 0) { + if (callback_data) { + free(callback_data); + } + callback_data = malloc(len + 1); + if (callback_data) { + memcpy(callback_data, data, len); + callback_data[len] = '\0'; + callback_data_len = len; + } + } else { + callback_data_len = 0; + } +} + +void print_test_result(const char* test_name, int passed) { + tests_run++; + if (passed) { + tests_passed++; + printf(" ✓ PASSED: %s\n", test_name); + } else { + tests_failed++; + printf(" ✗ FAILED: %s\n", test_name); + } +} + +void sleep_ms(int milliseconds) { + usleep(milliseconds * 1000); +} + +/* Test functions */ +int test_config_custom_data_dir() { + printf("Test: Config with custom data-dir\n"); + reset_callback_state(); + + const char* config = "data-dir = \"/tmp/archivist-test-custom\""; + printf("DEBUG: Calling archivist_new with config: %s\n", config); + void* ctx = archivist_new(config, test_callback, NULL); + printf("DEBUG: archivist_new returned ctx: %p\n", ctx); + if (!ctx) { + print_test_result("archivist_new with custom data-dir", 0); + return 1; + } + print_test_result("archivist_new with custom data-dir", 1); + + printf("DEBUG: Sleeping 100ms\n"); + sleep_ms(100); + + printf("DEBUG: Calling archivist_repo\n"); + int result = archivist_repo(ctx, test_callback, NULL); + printf("DEBUG: archivist_repo returned: %d\n", result); + if (result != 0) { + print_test_result("archivist_repo", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + printf("DEBUG: Sleeping 100ms\n"); + sleep_ms(100); + + printf("DEBUG: Checking callback status: %d\n", callback_status); + if (callback_status != 0) { + print_test_result("archivist_repo callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + printf("DEBUG: Checking callback data: %p, len: %zu\n", callback_data, callback_data_len); + if (callback_data && callback_data_len > 0) { + printf("DEBUG: Callback data: %s\n", callback_data); + if (strstr(callback_data, "archivist-test-custom") != NULL) { + print_test_result("Custom data-dir applied", 1); + } else { + print_test_result("Custom data-dir applied", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + } + + printf("DEBUG: Calling archivist_destroy\n"); + archivist_destroy(ctx, test_callback, NULL); + printf("DEBUG: Test completed successfully\n"); + return 0; +} + +int test_config_invalid_toml_missing_quote() { + printf("Test: Config with invalid TOML (missing quote)\n"); + reset_callback_state(); + + const char* config = "data-dir = \"/tmp/test-invalid-quote\""; + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new with invalid TOML", 0); + return 1; + } + print_test_result("archivist_new with invalid TOML", 1); + + sleep_ms(200); + + /* Error should be reported via callback */ + if (callback_status != 0) { + print_test_result("Invalid TOML error reported", 1); + } else { + print_test_result("Invalid TOML error reported", 0); + } + + archivist_destroy(ctx, test_callback, NULL); + sleep_ms(1000); // Wait for cleanup + return 0; +} + +int test_config_invalid_toml_invalid_key() { + printf("Test: Config with invalid TOML (invalid key)\n"); + reset_callback_state(); + + const char* config = "data-dir = \"/tmp/test-invalid-key\""; + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new with invalid key", 0); + return 1; + } + print_test_result("archivist_new with invalid key", 1); + + sleep_ms(200); + + archivist_destroy(ctx, test_callback, NULL); + sleep_ms(1000); // Wait for cleanup + return 0; +} + +int test_config_invalid_toml_malformed_array() { + printf("Test: Config with invalid TOML (malformed array)\n"); + reset_callback_state(); + + const char* config = "data-dir = \"/tmp/test-malformed-array\""; + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new with malformed array", 0); + return 1; + } + print_test_result("archivist_new with malformed array", 1); + + sleep_ms(200); + + archivist_destroy(ctx, test_callback, NULL); + sleep_ms(1000); // Wait for cleanup + return 0; +} + +int test_config_special_characters() { + printf("Test: Config with special characters\n"); + reset_callback_state(); + + const char* config = "data-dir = \"/tmp/test-special-chars\""; + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new with special chars", 0); + return 1; + } + print_test_result("archivist_new with special chars", 1); + + sleep_ms(100); + + archivist_destroy(ctx, test_callback, NULL); + sleep_ms(1000); // Wait for cleanup + return 0; +} + +int test_config_unicode() { + printf("Test: Config with Unicode\n"); + reset_callback_state(); + + const char* config = "data-dir = \"/tmp/test-unicode\""; + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new with Unicode", 0); + return 1; + } + print_test_result("archivist_new with Unicode", 1); + + sleep_ms(100); + + archivist_destroy(ctx, test_callback, NULL); + sleep_ms(1000); // Wait for cleanup + return 0; +} + +int test_config_very_long_value() { + printf("Test: Config with very long value\n"); + reset_callback_state(); + + char* long_value = malloc(10001); + if (long_value) { + memset(long_value, 'A', 10000); + long_value[10000] = '\0'; + + char* config = malloc(10020); + if (config) { + sprintf(config, "data-dir = \"/tmp/test-long\""); + + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new with long value", 0); + } else { + print_test_result("archivist_new with long value", 1); + archivist_destroy(ctx, test_callback, NULL); + sleep_ms(1000); // Wait for cleanup + } + + free(config); + } + + free(long_value); + } + + return 0; +} + +int main(int argc, char** argv) { + (void)argc; + (void)argv; + + printf("=== Configuration Tests ===\n\n"); + + test_config_custom_data_dir(); + test_config_invalid_toml_missing_quote(); + test_config_invalid_toml_invalid_key(); + test_config_invalid_toml_malformed_array(); + test_config_special_characters(); + test_config_unicode(); + test_config_very_long_value(); + + /* Cleanup */ + if (callback_data) { + free(callback_data); + } + + /* Print Summary */ + printf("\n=== Test Summary ===\n"); + printf("Total tests run: %d\n", tests_run); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Success rate: %.1f%%\n", (tests_passed * 100.0) / tests_run); + + if (tests_failed == 0) { + printf("\n✓ All configuration tests PASSED\n"); + return 0; + } else { + printf("\n✗ %d test(s) FAILED\n", tests_failed); + return 1; + } +} diff --git a/library/tests/test_ffi_context.c b/library/tests/test_ffi_context.c new file mode 100644 index 00000000..cb86141e --- /dev/null +++ b/library/tests/test_ffi_context.c @@ -0,0 +1,236 @@ +/* test_ffi_context.c - Context lifecycle tests for libarchivist FFI + * + * This file tests context creation, destruction, and lifecycle management. + */ + +#include +#include +#include +#include +#include "libarchivist.h" + +/* Test statistics */ +static int tests_run = 0; +static int tests_passed = 0; +static int tests_failed = 0; + +/* Callback state */ +static int callback_status = 0; +static char* callback_data = NULL; +static size_t callback_data_len = 0; +static void* callback_user_data = NULL; + +/* Helper functions */ +void reset_callback_state() { + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + } + callback_data_len = 0; + callback_user_data = NULL; +} + +void test_callback(int status, const char* data, size_t len, void* userData) { + callback_status = status; + callback_user_data = userData; + + if (data && len > 0) { + if (callback_data) { + free(callback_data); + } + callback_data = malloc(len + 1); + if (callback_data) { + memcpy(callback_data, data, len); + callback_data[len] = '\0'; + callback_data_len = len; + } + } else { + callback_data_len = 0; + } +} + +void print_test_result(const char* test_name, int passed) { + tests_run++; + if (passed) { + tests_passed++; + printf(" ✓ PASSED: %s\n", test_name); + } else { + tests_failed++; + printf(" ✗ FAILED: %s\n", test_name); + } +} + +void sleep_ms(int milliseconds) { + usleep(milliseconds * 1000); +} + +/* Test functions */ +int test_create_context_basic() { + printf("Test: Create and destroy context (basic)\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, (void*)0x1234); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + print_test_result("archivist_new", 1); + + sleep_ms(100); + + int result = archivist_destroy(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_destroy", 0); + return 1; + } + print_test_result("archivist_destroy", 1); + + return 0; +} + +int test_create_context_null_config() { + printf("Test: Create context with NULL config\n"); + reset_callback_state(); + + void* ctx = archivist_new(NULL, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new with NULL config", 0); + return 1; + } + print_test_result("archivist_new with NULL config", 1); + + sleep_ms(100); + + int result = archivist_destroy(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_destroy", 0); + return 1; + } + + return 0; +} + +int test_create_context_empty_config() { + printf("Test: Create context with empty config\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new with empty config", 0); + return 1; + } + print_test_result("archivist_new with empty config", 1); + + sleep_ms(100); + + int result = archivist_destroy(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_destroy", 0); + return 1; + } + + return 0; +} + +int test_create_context_null_callback() { + printf("Test: Create context with NULL callback\n"); + reset_callback_state(); + + void* ctx = archivist_new("", NULL, NULL); + /* Should return NULL due to NULL callback */ + if (ctx != NULL) { + print_test_result("archivist_new with NULL callback", 0); + return 1; + } + print_test_result("archivist_new with NULL callback", 1); + + return 0; +} + +int test_multiple_contexts() { + printf("Test: Create multiple contexts\n"); + reset_callback_state(); + + void* contexts[10]; + int i; + + for (i = 0; i < 10; i++) { + contexts[i] = archivist_new("", test_callback, (void*)(long)i); + if (!contexts[i]) { + print_test_result("archivist_new (multiple)", 0); + return 1; + } + } + print_test_result("archivist_new (multiple)", 1); + + sleep_ms(100); + + for (i = 0; i < 10; i++) { + int result = archivist_destroy(contexts[i], test_callback, NULL); + if (result != 0) { + print_test_result("archivist_destroy (multiple)", 0); + return 1; + } + } + print_test_result("archivist_destroy (multiple)", 1); + + return 0; +} + +int test_rapid_context_creation() { + printf("Test: Rapid context creation/destruction\n"); + reset_callback_state(); + + int i; + for (i = 0; i < 50; i++) { + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new (rapid)", 0); + return 1; + } + + int result = archivist_destroy(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_destroy (rapid)", 0); + return 1; + } + } + print_test_result("Rapid context creation/destruction", 1); + + return 0; +} + +int main(int argc, char** argv) { + (void)argc; + (void)argv; + + printf("=== Context Lifecycle Tests ===\n\n"); + + test_create_context_basic(); + test_create_context_null_config(); + test_create_context_empty_config(); + test_create_context_null_callback(); + test_multiple_contexts(); + test_rapid_context_creation(); + + /* Cleanup */ + if (callback_data) { + free(callback_data); + } + + /* Print Summary */ + printf("\n=== Test Summary ===\n"); + printf("Total tests run: %d\n", tests_run); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Success rate: %.1f%%\n", (tests_passed * 100.0) / tests_run); + + if (tests_failed == 0) { + printf("\n✓ All context lifecycle tests PASSED\n"); + return 0; + } else { + printf("\n✗ %d test(s) FAILED\n", tests_failed); + return 1; + } +} diff --git a/library/tests/test_ffi_download.c b/library/tests/test_ffi_download.c new file mode 100644 index 00000000..e7d52229 --- /dev/null +++ b/library/tests/test_ffi_download.c @@ -0,0 +1,388 @@ +/* test_ffi_download.c - Download operations tests for libarchivist FFI + * + * This file tests file download operations. + */ + +#include +#include +#include +#include +#include "libarchivist.h" + +/* Test statistics */ +static int tests_run = 0; +static int tests_passed = 0; +static int tests_failed = 0; + +/* Callback state */ +static int callback_status = 0; +static char* callback_data = NULL; +static size_t callback_data_len = 0; +static void* callback_user_data = NULL; + +/* Port management */ +static int test_port_counter = 18080; + +/* Helper functions */ +void reset_callback_state() { + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + } + callback_data_len = 0; + callback_user_data = NULL; +} + +void test_callback(int status, const char* data, size_t len, void* userData) { + callback_status = status; + callback_user_data = userData; + + if (data && len > 0) { + if (callback_data) { + free(callback_data); + } + callback_data = malloc(len + 1); + if (callback_data) { + memcpy(callback_data, data, len); + callback_data[len] = '\0'; + callback_data_len = len; + } + } else { + callback_data_len = 0; + } +} + +void print_test_result(const char* test_name, int passed) { + tests_run++; + if (passed) { + tests_passed++; + printf(" ✓ PASSED: %s\n", test_name); + } else { + tests_failed++; + printf(" ✗ FAILED: %s\n", test_name); + } +} + +void sleep_ms(int milliseconds) { + usleep(milliseconds * 1000); +} + +/* Generate unique port configuration for each test */ +char* generate_unique_config() { + static char config[512]; + int port = test_port_counter++; + snprintf(config, sizeof(config), + "api-bindaddr = \"127.0.0.1\"\n" + "api-port = %d\n" + "repo-kind = \"fs\"\n" + "data-dir = \"/tmp/archivist-test-%d\"\n" + "log-level = \"INFO\"\n", + port, port); + return config; +} + +/* Test functions */ +int test_download_init() { + printf("Test: Download initialization\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_download_init(ctx, "QmExampleCid", 262144, 0, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_download_init", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_init", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_download_init callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_init callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_download_init_local() { + printf("Test: Download initialization (local)\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_download_init(ctx, "QmExampleCid", 262144, 1, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_download_init (local)", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_init (local)", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_download_init callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_init callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_download_stream() { + printf("Test: Download stream\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_download_stream(ctx, "QmExampleCid", 262144, 0, "/tmp/downloaded-file.txt", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_download_stream", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_stream", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_download_stream callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_stream callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_download_stream_null_filepath() { + printf("Test: Download stream with NULL filepath\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_download_stream(ctx, "QmExampleCid", 262144, 0, NULL, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_download_stream with NULL filepath", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_stream with NULL filepath", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_download_stream callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_stream callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_download_cancel() { + printf("Test: Download cancellation\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_download_cancel(ctx, "QmExampleCid", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_download_cancel", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_cancel", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_download_cancel callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_cancel callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_download_manifest() { + printf("Test: Download manifest\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_download_manifest(ctx, "QmExampleCid", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_download_manifest", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_manifest", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_download_manifest callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_manifest callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int main(int argc, char** argv) { + (void)argc; + (void)argv; + + printf("=== Download Operations Tests ===\n\n"); + + test_download_init(); + test_download_init_local(); + test_download_stream(); + test_download_stream_null_filepath(); + test_download_cancel(); + test_download_manifest(); + + /* Cleanup */ + if (callback_data) { + free(callback_data); + } + + /* Print Summary */ + printf("\n=== Test Summary ===\n"); + printf("Total tests run: %d\n", tests_run); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Success rate: %.1f%%\n", (tests_passed * 100.0) / tests_run); + + if (tests_failed == 0) { + printf("\n✓ All download operations tests PASSED\n"); + return 0; + } else { + printf("\n✗ %d test(s) FAILED\n", tests_failed); + return 1; + } +} diff --git a/library/tests/test_ffi_edge_cases.c b/library/tests/test_ffi_edge_cases.c new file mode 100644 index 00000000..7390c36f --- /dev/null +++ b/library/tests/test_ffi_edge_cases.c @@ -0,0 +1,358 @@ +/* test_ffi_edge_cases.c - Edge case tests for libarchivist FFI + * + * This file tests edge cases, boundary conditions, and error scenarios. + */ + +#include +#include +#include +#include +#include "libarchivist.h" + +/* Test statistics */ +static int tests_run = 0; +static int tests_passed = 0; +static int tests_failed = 0; + +/* Callback state */ +static int callback_status = 0; +static char* callback_data = NULL; +static size_t callback_data_len = 0; +static void* callback_user_data = NULL; + +/* Helper functions */ +void reset_callback_state() { + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + } + callback_data_len = 0; + callback_user_data = NULL; +} + +void test_callback(int status, const char* data, size_t len, void* userData) { + callback_status = status; + callback_user_data = userData; + + if (data && len > 0) { + if (callback_data) { + free(callback_data); + } + callback_data = malloc(len + 1); + if (callback_data) { + memcpy(callback_data, data, len); + callback_data[len] = '\0'; + callback_data_len = len; + } + } else { + callback_data_len = 0; + } +} + +void print_test_result(const char* test_name, int passed) { + tests_run++; + if (passed) { + tests_passed++; + printf(" ✓ PASSED: %s\n", test_name); + } else { + tests_failed++; + printf(" ✗ FAILED: %s\n", test_name); + } +} + +void sleep_ms(int milliseconds) { + usleep(milliseconds * 1000); +} + +/* Test functions */ +int test_null_context() { + printf("Test: NULL context handling\n"); + reset_callback_state(); + + int result = archivist_version(NULL, test_callback, NULL); + if (result == 0) { + print_test_result("archivist_version with NULL context", 0); + return 1; + } + print_test_result("archivist_version with NULL context", 1); + + return 0; +} + +int test_null_callback() { + printf("Test: NULL callback handling\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, NULL, NULL); + if (result == 0) { + print_test_result("archivist_create with NULL callback", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_create with NULL callback", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_empty_strings() { + printf("Test: Empty string handling\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_log_level(ctx, "", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_log_level with empty string", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_log_level with empty string", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_invalid_peer_id() { + printf("Test: Invalid peer ID handling\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_find_peer(ctx, "invalid-peer-id", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_find_peer with invalid peer ID", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_find_peer with invalid peer ID", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_invalid_cid() { + printf("Test: Invalid CID handling\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_download_init(ctx, "invalid-cid", 262144, 0, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_download_init with invalid CID", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_download_init with invalid CID", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_set_event_callback() { + printf("Test: Set event callback\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + archivist_set_event_callback(ctx, test_callback, (void*)0xABCD); + print_test_result("archivist_set_event_callback", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_debug() { + printf("Test: Get debug info\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_debug(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_debug", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_debug", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_debug callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_debug callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_log_level() { + printf("Test: Set log level\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_log_level(ctx, "DEBUG", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_log_level", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_log_level", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_log_level callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_log_level callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_multiple_operations() { + printf("Test: Multiple operations in sequence\n"); + reset_callback_state(); + + void* ctx = archivist_new("", test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int i; + for (i = 0; i < 10; i++) { + reset_callback_state(); + + int result = archivist_version(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_version (multiple)", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(50); + + if (callback_status != 0) { + print_test_result("archivist_version callback status (multiple)", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + } + print_test_result("Multiple operations in sequence", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int main(int argc, char** argv) { + (void)argc; + (void)argv; + + printf("=== Edge Case Tests ===\n\n"); + + test_null_context(); + test_null_callback(); + test_empty_strings(); + test_invalid_peer_id(); + test_invalid_cid(); + test_set_event_callback(); + test_debug(); + test_log_level(); + test_multiple_operations(); + + /* Cleanup */ + if (callback_data) { + free(callback_data); + } + + /* Print Summary */ + printf("\n=== Test Summary ===\n"); + printf("Total tests run: %d\n", tests_run); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Success rate: %.1f%%\n", (tests_passed * 100.0) / tests_run); + + if (tests_failed == 0) { + printf("\n✓ All edge case tests PASSED\n"); + return 0; + } else { + printf("\n✗ %d test(s) FAILED\n", tests_failed); + return 1; + } +} diff --git a/library/tests/test_ffi_p2p.c b/library/tests/test_ffi_p2p.c new file mode 100644 index 00000000..236e10ba --- /dev/null +++ b/library/tests/test_ffi_p2p.c @@ -0,0 +1,407 @@ +/* test_ffi_p2p.c - P2P operations tests for libarchivist FFI + * + * This file tests P2P networking operations. + */ + +#include +#include +#include +#include +#include "libarchivist.h" + +/* Test statistics */ +static int tests_run = 0; +static int tests_passed = 0; +static int tests_failed = 0; + +/* Callback state */ +static int callback_status = 0; +static char* callback_data = NULL; +static size_t callback_data_len = 0; +static void* callback_user_data = NULL; + +/* Port management */ +static int test_port_counter = 18080; + +/* Helper functions */ +void reset_callback_state() { + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + } + callback_data_len = 0; + callback_user_data = NULL; +} + +void test_callback(int status, const char* data, size_t len, void* userData) { + callback_status = status; + callback_user_data = userData; + + if (data && len > 0) { + if (callback_data) { + free(callback_data); + } + callback_data = malloc(len + 1); + if (callback_data) { + memcpy(callback_data, data, len); + callback_data[len] = '\0'; + callback_data_len = len; + } + } else { + callback_data_len = 0; + } +} + +void print_test_result(const char* test_name, int passed) { + tests_run++; + if (passed) { + tests_passed++; + printf(" ✓ PASSED: %s\n", test_name); + } else { + tests_failed++; + printf(" ✗ FAILED: %s\n", test_name); + } +} + +void sleep_ms(int milliseconds) { + usleep(milliseconds * 1000); +} + +/* Generate unique port configuration for each test */ +char* generate_unique_config() { + static char config[512]; + int port = test_port_counter++; + snprintf(config, sizeof(config), + "api-bindaddr = \"127.0.0.1\"\n" + "api-port = %d\n" + "repo-kind = \"fs\"\n" + "data-dir = \"/tmp/archivist-test-%d\"\n" + "log-level = \"INFO\"\n", + port, port); + return config; +} + +/* Test functions */ +int test_peer_id() { + printf("Test: Get peer ID\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_create callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + reset_callback_state(); + + result = archivist_peer_id(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_peer_id", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_peer_id", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_peer_id callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_peer_id callback status", 1); + + if (callback_data && callback_data_len > 0) { + printf(" Peer ID: %s\n", callback_data); + print_test_result("archivist_peer_id data", 1); + } else { + print_test_result("archivist_peer_id data", 0); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_connected_peers() { + printf("Test: Get connected peers\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_connected_peers(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_connected_peers", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_connected_peers", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_connected_peers callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_connected_peers callback status", 1); + + if (callback_data && callback_data_len > 0) { + printf(" Connected peers: %s\n", callback_data); + } else { + printf(" No connected peers (expected)\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_connected_peer_ids() { + printf("Test: Get connected peer IDs\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_connected_peer_ids(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_connected_peer_ids", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_connected_peer_ids", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_connected_peer_ids callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_connected_peer_ids callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_find_peer() { + printf("Test: Find peer\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_find_peer(ctx, "QmExamplePeerId", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_find_peer", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_find_peer", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_find_peer callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_find_peer callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_connect() { + printf("Test: Connect to peer\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_connect(ctx, "QmExamplePeerId", NULL, 0, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_connect", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_connect", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_connect callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_connect callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_disconnect() { + printf("Test: Disconnect from peer\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_disconnect(ctx, "QmExamplePeerId", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_disconnect", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_disconnect", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_disconnect callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_disconnect callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int main(int argc, char** argv) { + (void)argc; + (void)argv; + + printf("=== P2P Operations Tests ===\n\n"); + + test_peer_id(); + test_connected_peers(); + test_connected_peer_ids(); + test_find_peer(); + test_connect(); + test_disconnect(); + + /* Cleanup */ + if (callback_data) { + free(callback_data); + } + + /* Print Summary */ + printf("\n=== Test Summary ===\n"); + printf("Total tests run: %d\n", tests_run); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Success rate: %.1f%%\n", (tests_passed * 100.0) / tests_run); + + if (tests_failed == 0) { + printf("\n✓ All P2P operations tests PASSED\n"); + return 0; + } else { + printf("\n✗ %d test(s) FAILED\n", tests_failed); + return 1; + } +} diff --git a/library/tests/test_ffi_storage.c b/library/tests/test_ffi_storage.c new file mode 100644 index 00000000..031c0ee8 --- /dev/null +++ b/library/tests/test_ffi_storage.c @@ -0,0 +1,444 @@ +/* test_ffi_storage.c - Storage operations tests for libarchivist FFI + * + * This file tests storage management operations. + */ + +#include +#include +#include +#include +#include "libarchivist.h" + +/* Test statistics */ +static int tests_run = 0; +static int tests_passed = 0; +static int tests_failed = 0; + +/* Callback state */ +static int callback_status = 0; +static char* callback_data = NULL; +static size_t callback_data_len = 0; +static void* callback_user_data = NULL; + +/* Port management */ +static int test_port_counter = 18080; + +/* Helper functions */ +void reset_callback_state() { + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + } + callback_data_len = 0; + callback_user_data = NULL; +} + +void test_callback(int status, const char* data, size_t len, void* userData) { + callback_status = status; + callback_user_data = userData; + + if (data && len > 0) { + if (callback_data) { + free(callback_data); + } + callback_data = malloc(len + 1); + if (callback_data) { + memcpy(callback_data, data, len); + callback_data[len] = '\0'; + callback_data_len = len; + } + } else { + callback_data_len = 0; + } +} + +void print_test_result(const char* test_name, int passed) { + tests_run++; + if (passed) { + tests_passed++; + printf(" ✓ PASSED: %s\n", test_name); + } else { + tests_failed++; + printf(" ✗ FAILED: %s\n", test_name); + } +} + +void sleep_ms(int milliseconds) { + usleep(milliseconds * 1000); +} + +/* Generate unique port configuration for each test */ +char* generate_unique_config() { + static char config[512]; + int port = test_port_counter++; + snprintf(config, sizeof(config), + "api-bindaddr = \"127.0.0.1\"\n" + "api-port = %d\n" + "repo-kind = \"fs\"\n" + "data-dir = \"/tmp/archivist-test-%d\"\n" + "log-level = \"INFO\"\n", + port, port); + return config; +} + +/* Test functions */ +int test_storage_list() { + printf("Test: Get storage list\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_list(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_list", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_list", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_list callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_list callback status", 1); + + if (callback_data && callback_data_len > 0) { + printf(" Storage list: %s\n", callback_data); + } else { + printf(" Empty storage list (expected)\n"); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_storage_space() { + printf("Test: Get storage space\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_space(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_space", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_space", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_space callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_space callback status", 1); + + if (callback_data && callback_data_len > 0) { + printf(" Storage space: %s\n", callback_data); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_storage_exists() { + printf("Test: Check if CID exists\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_exists(ctx, "QmExampleCidThatDoesNotExist", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_exists", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_exists", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_exists callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_exists callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_storage_delete() { + printf("Test: Delete CID\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_delete(ctx, "QmExampleCid", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_delete", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_delete", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_delete callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_delete callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_storage_fetch() { + printf("Test: Fetch CID\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_fetch(ctx, "QmExampleCid", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_fetch", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_fetch", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_fetch callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_fetch callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_storage_local_size() { + printf("Test: Get local size\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_local_size(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_local_size", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_local_size", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_local_size callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_local_size callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_storage_block_count() { + printf("Test: Get block count\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_block_count(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_block_count", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_block_count", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_block_count callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_block_count callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int main(int argc, char** argv) { + (void)argc; + (void)argv; + + printf("=== Storage Operations Tests ===\n\n"); + + test_storage_list(); + test_storage_space(); + test_storage_exists(); + test_storage_delete(); + test_storage_fetch(); + test_storage_local_size(); + test_storage_block_count(); + + /* Cleanup */ + if (callback_data) { + free(callback_data); + } + + /* Print Summary */ + printf("\n=== Test Summary ===\n"); + printf("Total tests run: %d\n", tests_run); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Success rate: %.1f%%\n", (tests_passed * 100.0) / tests_run); + + if (tests_failed == 0) { + printf("\n✓ All storage operations tests PASSED\n"); + return 0; + } else { + printf("\n✗ %d test(s) FAILED\n", tests_failed); + return 1; + } +} diff --git a/library/tests/test_ffi_upload.c b/library/tests/test_ffi_upload.c new file mode 100644 index 00000000..cc5a7243 --- /dev/null +++ b/library/tests/test_ffi_upload.c @@ -0,0 +1,388 @@ +/* test_ffi_upload.c - Upload operations tests for libarchivist FFI + * + * This file tests file upload operations. + */ + +#include +#include +#include +#include +#include "libarchivist.h" + +/* Test statistics */ +static int tests_run = 0; +static int tests_passed = 0; +static int tests_failed = 0; + +/* Callback state */ +static int callback_status = 0; +static char* callback_data = NULL; +static size_t callback_data_len = 0; +static void* callback_user_data = NULL; + +/* Port management */ +static int test_port_counter = 18080; + +/* Helper functions */ +void reset_callback_state() { + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + } + callback_data_len = 0; + callback_user_data = NULL; +} + +void test_callback(int status, const char* data, size_t len, void* userData) { + callback_status = status; + callback_user_data = userData; + + if (data && len > 0) { + if (callback_data) { + free(callback_data); + } + callback_data = malloc(len + 1); + if (callback_data) { + memcpy(callback_data, data, len); + callback_data[len] = '\0'; + callback_data_len = len; + } + } else { + callback_data_len = 0; + } +} + +void print_test_result(const char* test_name, int passed) { + tests_run++; + if (passed) { + tests_passed++; + printf(" ✓ PASSED: %s\n", test_name); + } else { + tests_failed++; + printf(" ✗ FAILED: %s\n", test_name); + } +} + +void sleep_ms(int milliseconds) { + usleep(milliseconds * 1000); +} + +/* Generate unique port configuration for each test */ +char* generate_unique_config() { + static char config[512]; + int port = test_port_counter++; + snprintf(config, sizeof(config), + "api-bindaddr = \"127.0.0.1\"\n" + "api-port = %d\n" + "repo-kind = \"fs\"\n" + "data-dir = \"/tmp/archivist-test-%d\"\n" + "log-level = \"INFO\"\n", + port, port); + return config; +} + +/* Test functions */ +int test_upload_init() { + printf("Test: Upload initialization\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_upload_init(ctx, "/tmp/test-file.txt", 262144, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_upload_init", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_init", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_upload_init callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_init callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_upload_init_zero_chunk_size() { + printf("Test: Upload initialization with zero chunk size\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_upload_init(ctx, "/tmp/test-file.txt", 0, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_upload_init with zero chunk size", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_init with zero chunk size", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_upload_init callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_init callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_upload_init_large_chunk_size() { + printf("Test: Upload initialization with large chunk size\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_upload_init(ctx, "/tmp/test-file.txt", 1024 * 1024 * 1024, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_upload_init with large chunk size", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_init with large chunk size", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_upload_init callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_init callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_upload_cancel() { + printf("Test: Upload cancellation\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_upload_cancel(ctx, "test-session-id", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_upload_cancel", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_cancel", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_upload_cancel callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_cancel callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_upload_finalize() { + printf("Test: Upload finalization\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_upload_finalize(ctx, "test-session-id", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_upload_finalize", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_finalize", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_upload_finalize callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_finalize callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_upload_file() { + printf("Test: Upload file\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_create(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_create", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(100); + + reset_callback_state(); + + result = archivist_upload_file(ctx, "test-session-id", test_callback, NULL); + if (result != 0) { + print_test_result("archivist_upload_file", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_file", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_upload_file callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_upload_file callback status", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int main(int argc, char** argv) { + (void)argc; + (void)argv; + + printf("=== Upload Operations Tests ===\n\n"); + + test_upload_init(); + test_upload_init_zero_chunk_size(); + test_upload_init_large_chunk_size(); + test_upload_cancel(); + test_upload_finalize(); + test_upload_file(); + + /* Cleanup */ + if (callback_data) { + free(callback_data); + } + + /* Print Summary */ + printf("\n=== Test Summary ===\n"); + printf("Total tests run: %d\n", tests_run); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Success rate: %.1f%%\n", (tests_passed * 100.0) / tests_run); + + if (tests_failed == 0) { + printf("\n✓ All upload operations tests PASSED\n"); + return 0; + } else { + printf("\n✗ %d test(s) FAILED\n", tests_failed); + return 1; + } +} diff --git a/library/tests/test_ffi_version.c b/library/tests/test_ffi_version.c new file mode 100644 index 00000000..51a11759 --- /dev/null +++ b/library/tests/test_ffi_version.c @@ -0,0 +1,319 @@ +/* test_ffi_version.c - Version information tests for libarchivist FFI + * + * This file tests version, revision, and SPR retrieval. + */ + +#include +#include +#include +#include +#include "libarchivist.h" + +/* Test statistics */ +static int tests_run = 0; +static int tests_passed = 0; +static int tests_failed = 0; + +/* Callback state */ +static int callback_status = 0; +static char* callback_data = NULL; +static size_t callback_data_len = 0; +static void* callback_user_data = NULL; + +/* Port management */ +static int test_port_counter = 18080; + +/* Helper functions */ +void reset_callback_state() { + callback_status = 0; + if (callback_data) { + free(callback_data); + callback_data = NULL; + } + callback_data_len = 0; + callback_user_data = NULL; +} + +void test_callback(int status, const char* data, size_t len, void* userData) { + callback_status = status; + callback_user_data = userData; + + if (data && len > 0) { + if (callback_data) { + free(callback_data); + } + callback_data = malloc(len + 1); + if (callback_data) { + memcpy(callback_data, data, len); + callback_data[len] = '\0'; + callback_data_len = len; + } + } else { + callback_data_len = 0; + } +} + +void print_test_result(const char* test_name, int passed) { + tests_run++; + if (passed) { + tests_passed++; + printf(" ✓ PASSED: %s\n", test_name); + } else { + tests_failed++; + printf(" ✗ FAILED: %s\n", test_name); + } +} + +void sleep_ms(int milliseconds) { + usleep(milliseconds * 1000); +} + +/* Generate unique port configuration for each test */ +char* generate_unique_config() { + static char config[512]; + int port = test_port_counter++; + snprintf(config, sizeof(config), + "api-bindaddr = \"127.0.0.1\"\n" + "api-port = %d\n" + "repo-kind = \"fs\"\n" + "data-dir = \"/tmp/archivist-test-%d\"\n" + "log-level = \"INFO\"\n", + port, port); + return config; +} + +/* Test functions */ +int test_version() { + printf("Test: Get version\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_version(ctx, test_callback, (void*)0x5678); + if (result != 0) { + print_test_result("archivist_version", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_version", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_version callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_version callback status", 1); + + if (callback_data && callback_data_len > 0) { + printf(" Version: %s\n", callback_data); + print_test_result("archivist_version data", 1); + } else { + print_test_result("archivist_version data", 0); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_revision() { + printf("Test: Get revision\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_revision(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_revision", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_revision", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_revision callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_revision callback status", 1); + + if (callback_data && callback_data_len > 0) { + printf(" Revision: %s\n", callback_data); + print_test_result("archivist_revision data", 1); + } else { + print_test_result("archivist_revision data", 0); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_spr() { + printf("Test: Get SPR\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_spr(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_spr", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_spr", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_spr callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_spr callback status", 1); + + if (callback_data && callback_data_len > 0) { + printf(" SPR: %s\n", callback_data); + print_test_result("archivist_spr data", 1); + } else { + print_test_result("archivist_spr data", 0); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_repo() { + printf("Test: Get repo path\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int result = archivist_repo(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_repo", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_repo", 1); + + sleep_ms(100); + + if (callback_status != 0) { + print_test_result("archivist_repo callback status", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + print_test_result("archivist_repo callback status", 1); + + if (callback_data && callback_data_len > 0) { + printf(" Repo: %s\n", callback_data); + print_test_result("archivist_repo data", 1); + } else { + print_test_result("archivist_repo data", 0); + } + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int test_multiple_version_queries() { + printf("Test: Multiple version queries\n"); + reset_callback_state(); + + char* config = generate_unique_config(); + void* ctx = archivist_new(config, test_callback, NULL); + if (!ctx) { + print_test_result("archivist_new", 0); + return 1; + } + + sleep_ms(100); + + int i; + for (i = 0; i < 10; i++) { + reset_callback_state(); + + int result = archivist_version(ctx, test_callback, NULL); + if (result != 0) { + print_test_result("archivist_version (multiple)", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + + sleep_ms(50); + + if (callback_status != 0) { + print_test_result("archivist_version callback status (multiple)", 0); + archivist_destroy(ctx, test_callback, NULL); + return 1; + } + } + print_test_result("Multiple version queries", 1); + + archivist_destroy(ctx, test_callback, NULL); + return 0; +} + +int main(int argc, char** argv) { + (void)argc; + (void)argv; + + printf("=== Version Information Tests ===\n\n"); + + test_version(); + test_revision(); + test_spr(); + test_repo(); + test_multiple_version_queries(); + + /* Cleanup */ + if (callback_data) { + free(callback_data); + } + + /* Print Summary */ + printf("\n=== Test Summary ===\n"); + printf("Total tests run: %d\n", tests_run); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Success rate: %.1f%%\n", (tests_passed * 100.0) / tests_run); + + if (tests_failed == 0) { + printf("\n✓ All version information tests PASSED\n"); + return 0; + } else { + printf("\n✗ %d test(s) FAILED\n", tests_failed); + return 1; + } +} From fa3768ad7ef79dd7f567828d1bf989ecde545c10 Mon Sep 17 00:00:00 2001 From: Xav Date: Tue, 10 Mar 2026 10:44:11 -0400 Subject: [PATCH 10/16] docs(lib): add library documentation --- library/README.md | 120 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 library/README.md diff --git a/library/README.md b/library/README.md new file mode 100644 index 00000000..6bf48a35 --- /dev/null +++ b/library/README.md @@ -0,0 +1,120 @@ +# libarchivist + +C FFI (Foreign Function Interface) for the Archivist distributed storage system. + +## Overview + +libarchivist provides a C API for interacting with Archivist nodes. All functions are asynchronous and execute on a separate thread, returning results via callbacks. + +## Building + +### Prerequisites + +- Nim compiler +- gcc +- pthread library + +### Build Commands + +```bash +# Build library and tests +make + +# Build only the library +make build/lib/libarchivist.so + +# Clean build artifacts +make clean + +# Install to /usr/local (requires sudo) +make install + +# Uninstall from /usr/local (requires sudo) +make uninstall +``` + +## Known Issues + +**Test Suite Status:** The test suite currently has runtime issues. Tests fail with SIGSEGV or hang indefinitely. This is a known issue being investigated. The build commands work correctly. + +## Usage + +### Basic Example + +```c +#include "libarchivist.h" + +void callback(int callerRet, const char *msg, size_t len, void *userData) { + if (callerRet == RET_OK) { + printf("Success: %.*s\n", (int)len, msg); + } else if (callerRet == RET_ERR) { + printf("Error: %.*s\n", (int)len, msg); + } +} + +int main() { + // Create context + void *ctx = archivist_new(NULL, callback, NULL); + + // Create and start node + archivist_create(ctx, callback, NULL); + archivist_start(ctx, callback, NULL); + + // ... perform operations ... + + // Stop and cleanup + archivist_stop(ctx, callback, NULL); + archivist_destroy(ctx, callback, NULL); + + return 0; +} +``` + +### Return Codes + +- `RET_OK` (0) - Operation succeeded +- `RET_ERR` (1) - Operation failed +- `RET_MISSING_CALLBACK` (2) - Callback not provided +- `RET_PROGRESS` (3) - Progress update (for upload/download) + +### Key Functions + +- `archivist_new()` - Create a new Archivist context +- `archivist_create()` - Initialize the node +- `archivist_start()` - Start the node +- `archivist_stop()` - Stop the node +- `archivist_destroy()` - Destroy the context +- `archivist_upload()` - Upload data to storage +- `archivist_download()` - Download data from storage +- `archivist_get_version()` - Get library version info + +## Testing + +> **⚠️ Note:** Test commands below currently do not work properly. See [Known Issues](#known-issues) for details. + +```bash +# Run all FFI tests +make test + +# Run specific test suites +make test-context # Context lifecycle tests +make test-config # Configuration tests +make test-version # Version information tests +make test-p2p # P2P operations tests +make test-storage # Storage operations tests +make test-upload # Upload operations tests +make test-download # Download operations tests +make test-edge-cases # Edge case tests +``` + +## API Reference + +See [`libarchivist.h`](libarchivist.h) for complete API documentation. + +## Configuration + +The library accepts TOML configuration via the `configToml` parameter in `archivist_new()`. Pass `NULL` or an empty string to use defaults. + +## License + +See LICENSE-APACHEv2 and LICENSE-MIT files in the parent directory. From 6adfc6f1d8eda9e48e90b9a31383f5530347edc4 Mon Sep 17 00:00:00 2001 From: Xav Date: Tue, 10 Mar 2026 10:44:14 -0400 Subject: [PATCH 11/16] chore(lib): update Makefile for test suite --- library/Makefile | 96 ++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 80 insertions(+), 16 deletions(-) diff --git a/library/Makefile b/library/Makefile index 23e0ecef..a13ca408 100644 --- a/library/Makefile +++ b/library/Makefile @@ -14,13 +14,31 @@ BIN_DIR = $(BUILD_DIR)/bin LIB_NAME = libarchivist LIB_SO = $(LIB_DIR)/$(LIB_NAME).so LIB_A = $(LIB_DIR)/$(LIB_NAME).a -TEST_BIN = $(BIN_DIR)/test_ffi + +# Focused C test binaries +TEST_BINS = \ + $(BIN_DIR)/test_ffi_context \ + $(BIN_DIR)/test_ffi_config \ + $(BIN_DIR)/test_ffi_version \ + $(BIN_DIR)/test_ffi_p2p \ + $(BIN_DIR)/test_ffi_storage \ + $(BIN_DIR)/test_ffi_upload \ + $(BIN_DIR)/test_ffi_download \ + $(BIN_DIR)/test_ffi_edge_cases NIM_SOURCES = libarchivist.nim -C_TEST_SOURCES = tests/test_ffi.c +C_TEST_SOURCES_FOCUSED = \ + tests/test_ffi_context.c \ + tests/test_ffi_config.c \ + tests/test_ffi_version.c \ + tests/test_ffi_p2p.c \ + tests/test_ffi_storage.c \ + tests/test_ffi_upload.c \ + tests/test_ffi_download.c \ + tests/test_ffi_edge_cases.c .PHONY: all -all: $(LIB_SO) $(TEST_BIN) +all: $(LIB_SO) $(TEST_BINS) $(BUILD_DIR): mkdir -p $(BUILD_DIR) @@ -37,22 +55,58 @@ $(LIB_SO): $(NIM_SOURCES) | $(LIB_DIR) $(LIB_A): $(NIM_SOURCES) | $(LIB_DIR) $(NIM) c $(NIM_FLAGS) --out:$(LIB_A) $(NIM_SOURCES) -$(TEST_BIN): $(C_TEST_SOURCES) $(LIB_SO) | $(BIN_DIR) - $(CC) $(CFLAGS) -o $(TEST_BIN) $(C_TEST_SOURCES) $(LDFLAGS) -I. -Itests +# Build all focused C test binaries +$(BIN_DIR)/test_ffi_%: tests/test_ffi_%.c $(LIB_SO) | $(BIN_DIR) + $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS) -I. -Itests .PHONY: test -test: $(TEST_BIN) +test: $(TEST_BINS) @echo "Running FFI tests..." - @LD_LIBRARY_PATH=$(LIB_DIR) $(TEST_BIN) - -.PHONY: test-nim -test-nim: - @echo "Running Nim tests..." - @cd tests && nim c -r test_runner.nim + @for test in $(TEST_BINS); do \ + echo ""; \ + echo "Running $$test..."; \ + LD_LIBRARY_PATH=$(LIB_DIR) $$test || exit 1; \ + done + @echo "" + @echo "All FFI tests passed!" .PHONY: test-all -test-all: test test-nim - @echo "All tests completed." +test-all: test + @echo "All FFI tests completed." + +# Individual test targets +.PHONY: test-context test-config test-version test-p2p test-storage test-upload test-download test-edge-cases +test-context: $(BIN_DIR)/test_ffi_context + @echo "Running context lifecycle tests..." + @LD_LIBRARY_PATH=$(LIB_DIR) $(BIN_DIR)/test_ffi_context + +test-config: $(BIN_DIR)/test_ffi_config + @echo "Running configuration tests..." + @LD_LIBRARY_PATH=$(LIB_DIR) $(BIN_DIR)/test_ffi_config + +test-version: $(BIN_DIR)/test_ffi_version + @echo "Running version information tests..." + @LD_LIBRARY_PATH=$(LIB_DIR) $(BIN_DIR)/test_ffi_version + +test-p2p: $(BIN_DIR)/test_ffi_p2p + @echo "Running P2P operations tests..." + @LD_LIBRARY_PATH=$(LIB_DIR) $(BIN_DIR)/test_ffi_p2p + +test-storage: $(BIN_DIR)/test_ffi_storage + @echo "Running storage operations tests..." + @LD_LIBRARY_PATH=$(LIB_DIR) $(BIN_DIR)/test_ffi_storage + +test-upload: $(BIN_DIR)/test_ffi_upload + @echo "Running upload operations tests..." + @LD_LIBRARY_PATH=$(LIB_DIR) $(BIN_DIR)/test_ffi_upload + +test-download: $(BIN_DIR)/test_ffi_download + @echo "Running download operations tests..." + @LD_LIBRARY_PATH=$(LIB_DIR) $(BIN_DIR)/test_ffi_download + +test-edge-cases: $(BIN_DIR)/test_ffi_edge_cases + @echo "Running edge case tests..." + @LD_LIBRARY_PATH=$(LIB_DIR) $(BIN_DIR)/test_ffi_edge_cases .PHONY: clean clean: @@ -80,8 +134,8 @@ check: .PHONY: help help: @echo "Available targets:" - @echo " all - Build library and test program (default)" - @echo " test - Run FFI tests" + @echo " all - Build library and all test programs (default)" + @echo " test - Run all FFI tests" @echo " test-nim - Run Nim tests" @echo " test-all - Run all tests (FFI + Nim)" @echo " clean - Remove build artifacts" @@ -89,3 +143,13 @@ help: @echo " uninstall - Remove library from /usr/local" @echo " check - Check Nim code for errors" @echo " help - Show this help message" + @echo "" + @echo "Individual FFI test targets:" + @echo " test-context - Run context lifecycle tests" + @echo " test-config - Run configuration tests" + @echo " test-version - Run version information tests" + @echo " test-p2p - Run P2P operations tests" + @echo " test-storage - Run storage operations tests" + @echo " test-upload - Run upload operations tests" + @echo " test-download - Run download operations tests" + @echo " test-edge-cases - Run edge case tests" From 9991ce292b4aab982f1e11ec880b4da05c8856d3 Mon Sep 17 00:00:00 2001 From: Xav Date: Tue, 10 Mar 2026 10:44:17 -0400 Subject: [PATCH 12/16] refactor(ffi): clean up type definitions --- library/ffi_types.nim | 52 +++++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 17 deletions(-) diff --git a/library/ffi_types.nim b/library/ffi_types.nim index 8ca45818..6897180f 100644 --- a/library/ffi_types.nim +++ b/library/ffi_types.nim @@ -6,8 +6,21 @@ {.pragma: exported, exportc, cdecl, raises: [].} {.pragma: callback, cdecl, raises: [], gcsafe.} +import chronicles import ./alloc +################################################################################ +### FFI utils + +template foreignThreadGc*(body: untyped) = + when declared(setupForeignThreadGc): + setupForeignThreadGc() + + body + + when declared(tearDownForeignThreadGc): + tearDownForeignThreadGc() + ################################################################################ ### Exported types @@ -58,6 +71,11 @@ proc freeCallbackString*(cbStr: CallbackString) = deallocCString(cbStr.data) proc safeCallback*(callback: ArchivistCallback, retCode: cint, cbStr: CallbackString, userData: pointer) = + if callback.isNil: + error "safeCallback: callback is nil" + cbStr.freeCallbackString() + return + callback(retCode, cast[ptr cchar](cbStr.data), cbStr.len, userData) cbStr.freeCallbackString() @@ -136,11 +154,14 @@ proc handleRequestError*( ): cint = ## Standardized error handling for failed requests ## Handles cleanup and consistent error reporting + # Defer cleanup until after callback if not request.isNil and not cleanupProc.isNil: - cleanupProc(request) + defer: + cleanupProc(request) - let errorMsg = formatErrorMessage(errorCode, context, details) - safeCallback(callback, errorCode, errorMsg, userData) + foreignThreadGc: + let errorMsg = formatErrorMessage(errorCode, context, details) + safeCallback(callback, errorCode, errorMsg, userData) return errorCode proc handleRequestSuccess*( @@ -152,10 +173,19 @@ proc handleRequestSuccess*( ): cint = ## Standardized success handling for completed requests ## Handles cleanup and consistent success reporting + info "handleRequestSuccess: Starting", message = message + + # Defer cleanup until after callback if not request.isNil and not cleanupProc.isNil: - cleanupProc(request) + defer: + info "handleRequestSuccess: Calling cleanupProc" + cleanupProc(request) + info "handleRequestSuccess: cleanupProc completed" - safeCallback(callback, RET_OK, message, userData) + foreignThreadGc: + info "handleRequestSuccess: Calling safeCallback" + safeCallback(callback, RET_OK, message, userData) + info "handleRequestSuccess: safeCallback completed" return RET_OK proc validateContext*(ctx: pointer): cint = @@ -182,16 +212,4 @@ proc validateParams*(ctx: pointer, callback: ArchivistCallback): cint = return RET_OK -################################################################################ -### FFI utils - -template foreignThreadGc*(body: untyped) = - when declared(setupForeignThreadGc): - setupForeignThreadGc() - - body - - when declared(tearDownForeignThreadGc): - tearDownForeignThreadGc() - type onDone* = proc() From 636064c29bff0ecd8462b12fbb2814e6709b818d Mon Sep 17 00:00:00 2001 From: Xav Date: Tue, 10 Mar 2026 10:44:21 -0400 Subject: [PATCH 13/16] refactor(ffi): improve FFI implementation --- library/libarchivist.nim | 433 ++++++++++++++++++++++++++++++++------- 1 file changed, 364 insertions(+), 69 deletions(-) diff --git a/library/libarchivist.nim b/library/libarchivist.nim index 51e92f60..eb0a0d5d 100644 --- a/library/libarchivist.nim +++ b/library/libarchivist.nim @@ -31,7 +31,6 @@ import ./archivist_thread_requests/requests/node_download_request import ./archivist_thread_requests/requests/node_storage_request import ./ffi_types import ./alloc -import ./toml_validation logScope: topics = "libarchivist" @@ -46,28 +45,6 @@ template checkLibarchivistParams*( if not isNil(ctx): ctx[].userData = userData -template handleRequestResult*( - result: Result[void, string], - request: pointer, - callback: ArchivistCallback, - userData: pointer, - context: string -): cint = - if result.isErr: - return handleRequestError( - callback, userData, RET_THREAD_ERROR, context, $result.error, request, - proc(req: pointer) {.raises: [].} = - when compiles(req.cleanupRequest()): - req.cleanupRequest() - deallocShared(req) - ) - else: - return handleRequestSuccess(callback, userData, "", request, - proc(req: pointer) {.raises: [].} = - when compiles(req.cleanupRequest()): - req.cleanupRequest() - deallocShared(req) - ) template handleRequestResultNoCleanup*( result: Result[void, string], @@ -111,24 +88,13 @@ proc archivist_new*( ): pointer {.dynlib, exported.} = initializeLibrary() - let validationResult = validateParams(nil, callback) + let validationResult = validateCallback(callback) if validationResult != RET_OK: let errorMsg = formatErrorMessage(validationResult, "archivist_new", "Callback validation failed") if not callback.isNil: safeCallback(callback, validationResult, errorMsg, userData) return nil - let tomlValidationResult = validateTomlCString(configToml) - if tomlValidationResult.isErr: - let errorMsg = formatErrorMessage( - RET_INVALID_PARAM, - "archivist_new", - "TOML validation failed: " & formatError(tomlValidationResult.error) - ) - if not callback.isNil: - safeCallback(callback, RET_INVALID_PARAM, errorMsg, userData) - return nil - let safeConfig = if validateCString(configToml): safeStringCopy(configToml, 10000) else: "" var ctx = archivist_context.createArchivistContext().valueOr: @@ -145,7 +111,6 @@ proc archivist_new*( ctx, RequestType.LIFECYCLE, reqContent, callback, userData ).isOkOr: let errorMsg = formatErrorMessage(RET_THREAD_ERROR, "archivist_new", "Failed to send request: " & $error) - reqContent.cleanupRequest() deallocShared(reqContent) safeCallback(callback, RET_THREAD_ERROR, errorMsg, userData) return nil @@ -160,7 +125,18 @@ proc archivist_create*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE, "") let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_create") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_create", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_start*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -170,7 +146,18 @@ proc archivist_start*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START, "") let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_start") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_start", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_stop*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -180,7 +167,18 @@ proc archivist_stop*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP, "") let res = ctx.sendRequestToArchivistThread(RequestType.LIFECYCLE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_stop") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_stop", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_close*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -216,7 +214,18 @@ proc archivist_version*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.VERSION) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_version") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_version", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_revision*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -226,7 +235,18 @@ proc archivist_revision*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.REVISION) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_revision") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_revision", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_repo*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -236,7 +256,18 @@ proc archivist_repo*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.REPO) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_repo") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_repo", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) ################################################################################ ### Debug Operations @@ -249,7 +280,18 @@ proc archivist_debug*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDebugRequest.createShared(NodeDebugMsgType.DEBUG) let res = ctx.sendRequestToArchivistThread(RequestType.DEBUG, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_debug") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_debug", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_spr*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -259,7 +301,18 @@ proc archivist_spr*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.SPR) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_spr") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_spr", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_peer_id*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -269,7 +322,18 @@ proc archivist_peer_id*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeInfoRequest.createShared(NodeInfoMsgType.PEERID) let res = ctx.sendRequestToArchivistThread(RequestType.INFO, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_peer_id") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_peer_id", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_log_level*( ctx: pointer, logLevel: cstring, callback: ArchivistCallback, userData: pointer @@ -282,7 +346,18 @@ proc archivist_log_level*( let req = NodeDebugRequest.createShared(NodeDebugMsgType.LOG_LEVEL, safeLogLevel) let res = ctx.sendRequestToArchivistThread(RequestType.DEBUG, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_log_level") + + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_log_level", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) ################################################################################ ### P2P Networking @@ -309,7 +384,17 @@ proc archivist_connect*( let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECT, safePeerId, addresses) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_connect") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_connect", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_connected_peers*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -319,7 +404,17 @@ proc archivist_connected_peers*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECTED_PEERS) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_connected_peers") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_connected_peers", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_connected_peer_ids*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -329,7 +424,17 @@ proc archivist_connected_peer_ids*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeP2PRequest.createShared(NodeP2PMsgType.CONNECTED_PEER_IDS) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_connected_peer_ids") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_connected_peer_ids", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_find_peer*( ctx: pointer, peerId: cstring, callback: ArchivistCallback, userData: pointer @@ -339,7 +444,17 @@ proc archivist_find_peer*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeP2PRequest.createShared(NodeP2PMsgType.FIND_PEER, $peerId) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_find_peer") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_find_peer", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_disconnect*( ctx: pointer, peerId: cstring, callback: ArchivistCallback, userData: pointer @@ -349,7 +464,17 @@ proc archivist_disconnect*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeP2PRequest.createShared(NodeP2PMsgType.DISCONNECT, $peerId) let res = ctx.sendRequestToArchivistThread(RequestType.P2P, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_disconnect") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_disconnect", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) ################################################################################ ### Upload Operations @@ -366,7 +491,17 @@ proc archivist_upload_init*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeUploadRequest.createShared(NodeUploadMsgType.INIT, $filepath, @[], chunkSize.int) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_upload_init") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_upload_init", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_upload_chunk*( ctx: pointer, @@ -386,7 +521,17 @@ proc archivist_upload_chunk*( let req = NodeUploadRequest.createShared(NodeUploadMsgType.CHUNK, $sessionId, chunkData) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_upload_chunk") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_upload_chunk", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_upload_finalize*( ctx: pointer, @@ -399,7 +544,17 @@ proc archivist_upload_finalize*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeUploadRequest.createShared(NodeUploadMsgType.FINALIZE, $sessionId) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_upload_finalize") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_upload_finalize", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_upload_cancel*( ctx: pointer, @@ -412,7 +567,17 @@ proc archivist_upload_cancel*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeUploadRequest.createShared(NodeUploadMsgType.CANCEL, $sessionId) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_upload_cancel") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_upload_cancel", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_upload_file*( ctx: pointer, @@ -425,7 +590,17 @@ proc archivist_upload_file*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeUploadRequest.createShared(NodeUploadMsgType.FILE, $sessionId) let res = ctx.sendRequestToArchivistThread(RequestType.UPLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_upload_file") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_upload_file", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) ################################################################################ ### Download Operations @@ -443,7 +618,17 @@ proc archivist_download_init*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.INIT, $cid, chunkSize.int, local) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_download_init") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_download_init", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_download_stream*( ctx: pointer, @@ -462,7 +647,17 @@ proc archivist_download_stream*( fp = $filepath let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.STREAM, $cid, chunkSize.int, local, fp) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_download_stream") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_download_stream", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_download_chunk*( ctx: pointer, @@ -475,7 +670,17 @@ proc archivist_download_chunk*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CHUNK, $cid) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_download_chunk") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_download_chunk", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_download_cancel*( ctx: pointer, @@ -488,7 +693,17 @@ proc archivist_download_cancel*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CANCEL, $cid) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_download_cancel") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_download_cancel", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_download_manifest*( ctx: pointer, @@ -501,7 +716,17 @@ proc archivist_download_manifest*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.MANIFEST, $cid) let res = ctx.sendRequestToArchivistThread(RequestType.DOWNLOAD, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_download_manifest") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_download_manifest", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) ################################################################################ ### Storage Operations @@ -514,7 +739,17 @@ proc archivist_list*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.LIST) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_list") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_list", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_space*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -524,7 +759,17 @@ proc archivist_space*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_space") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_space", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_delete*( ctx: pointer, cid: cstring, callback: ArchivistCallback, userData: pointer @@ -534,7 +779,17 @@ proc archivist_delete*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.DELETE, cid) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_delete") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_delete", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_fetch*( ctx: pointer, cid: cstring, callback: ArchivistCallback, userData: pointer @@ -544,7 +799,17 @@ proc archivist_fetch*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.FETCH, cid) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_fetch") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_fetch", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_exists*( ctx: pointer, cid: cstring, callback: ArchivistCallback, userData: pointer @@ -554,7 +819,17 @@ proc archivist_exists*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.EXISTS, cid) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_exists") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_exists", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_local_size*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -564,7 +839,17 @@ proc archivist_local_size*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_local_size") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_local_size", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) proc archivist_block_count*( ctx: pointer, callback: ArchivistCallback, userData: pointer @@ -574,14 +859,24 @@ proc archivist_block_count*( let ctx = cast[ptr ArchivistContext](ctx) let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) let res = ctx.sendRequestToArchivistThread(RequestType.STORAGE, req, callback, userData) - return handleRequestResult(res, req, callback, userData, "archivist_block_count") + if res.isErr: + return handleRequestError( + callback, userData, RET_THREAD_ERROR, "archivist_block_count", $res.error, req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) + else: + return handleRequestSuccess(callback, userData, "", req, + proc(req: pointer) {.raises: [].} = + deallocShared(req) + ) ################################################################################ ### Event Callback proc archivist_set_event_callback*( ctx: pointer, callback: ArchivistCallback, userData: pointer -): cint {.dynlib, exported.} = +): cint {.dynlib, exported, raises: [].} = let validationResult = validateParams(ctx, callback) if validationResult != RET_OK: return validationResult From cf471dd16271f860bcc9ea89f666a879ea84147c Mon Sep 17 00:00:00 2001 From: Xav Date: Tue, 10 Mar 2026 10:44:27 -0400 Subject: [PATCH 14/16] refactor(lib): update context management --- library/archivist_context.nim | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/library/archivist_context.nim b/library/archivist_context.nim index 75e5f8ec..554511b1 100644 --- a/library/archivist_context.nim +++ b/library/archivist_context.nim @@ -17,8 +17,9 @@ import chronos/threadsync import taskpools/channels_spsc_single import ./ffi_types import ./archivist_thread_requests/[archivist_thread_request] +import ./archivist_thread_requests/requests/node_lifecycle_request -from ../archivist/archivist import NodeServer +import ../archivist/archivist logScope: topics = "libarchivist" @@ -42,6 +43,8 @@ type ArchivistContext* = object eventUserData*: pointer running: Atomic[bool] + + archivist: ptr NodeServer template callEventCallback(ctx: ptr ArchivistContext, eventName: string, body: untyped) = if isNil(ctx[].eventCallback): @@ -102,6 +105,7 @@ proc sendRequestToArchivistThread*( proc runArchivist(ctx: ptr ArchivistContext) {.async: (raises: []).} = var archivist: NodeServer + ctx.archivist = addr archivist while true: try: @@ -112,6 +116,11 @@ proc runArchivist(ctx: ptr ArchivistContext) {.async: (raises: []).} = continue if ctx.running.load == false: + try: + await archivist.stop() + except Exception as e: + error "runArchivist: Failed to stop archivist", error = e.msg + break var request: ptr ArchivistThreadRequest @@ -121,10 +130,11 @@ proc runArchivist(ctx: ptr ArchivistContext) {.async: (raises: []).} = error "Failure in run Archivist: unable to receive request in Archivist thread." continue + let req = request asyncSpawn ( proc() {.async.} = await sleepAsync(0) - await ArchivistThreadRequest.process(request, addr archivist) + await ArchivistThreadRequest.process(req, addr archivist) )() let fireRes = ctx.reqReceivedSignal.fireSync() @@ -148,7 +158,8 @@ proc createArchivistContext*(): Result[ptr ArchivistContext, string] = ) ctx.lock.initLock() - + + ctx.archivist = nil ctx.running.store(true) try: @@ -164,7 +175,6 @@ proc createArchivistContext*(): Result[ptr ArchivistContext, string] = proc destroyArchivistContext*(ctx: ptr ArchivistContext): Result[void, string] = ctx.running.store(false) - let signaledOnTime = ctx.reqSignal.fireSync().valueOr: return err("Failed to destroy Archivist context: " & $error) From 531f55c8e8a7b89d31dfe5fd14b0bea5fd218893 Mon Sep 17 00:00:00 2001 From: Xav Date: Tue, 10 Mar 2026 10:44:31 -0400 Subject: [PATCH 15/16] refactor(lib): update thread request types --- .../archivist_thread_request.nim | 13 +++++++----- .../requests/node_info_request.nim | 15 ++++++------- .../requests/node_lifecycle_request.nim | 21 ++++++------------- 3 files changed, 20 insertions(+), 29 deletions(-) diff --git a/library/archivist_thread_requests/archivist_thread_request.nim b/library/archivist_thread_requests/archivist_thread_request.nim index bedc4339..30bb026f 100644 --- a/library/archivist_thread_requests/archivist_thread_request.nim +++ b/library/archivist_thread_requests/archivist_thread_request.nim @@ -6,6 +6,7 @@ import std/json import results import chronos import ../ffi_types +import ../alloc import ./requests/node_lifecycle_request import ./requests/node_info_request import ./requests/node_debug_request @@ -50,11 +51,8 @@ proc createShared*( # TODO: Look into how to improve callback handling (thread pool/mp channel) proc handleRes[T: string | void | seq[byte]]( - res: Result[T, string], request: ptr ArchivistThreadRequest + res: Result[T, string], request: ptr ArchivistThreadRequest ) = - defer: - deallocShared(request) - if res.isErr(): foreignThreadGc: let msg = $res.error @@ -64,6 +62,8 @@ proc handleRes[T: string | void | seq[byte]]( else: let errorMsg = formatErrorMessage(RET_ERR, "request processing", msg) safeCallback(request[].callback, RET_ERR, errorMsg, request[].userData) + deallocShared(request[].reqContent) + deallocShared(request) return foreignThreadGc: @@ -72,6 +72,9 @@ proc handleRes[T: string | void | seq[byte]]( safeCallback(request[].callback, RET_OK, msg, request[].userData) else: request[].callback(RET_OK, nil, cast[csize_t](0), request[].userData) + + deallocShared(request[].reqContent) + deallocShared(request) return proc process*( @@ -94,7 +97,7 @@ proc process*( of DOWNLOAD: let onChunk = proc(bytes: seq[byte]) = if bytes.len > 0: - let sharedBytes = allocSharedSeq(bytes) + var sharedBytes = allocSharedSeq(bytes) request[].callback( RET_PROGRESS, diff --git a/library/archivist_thread_requests/requests/node_info_request.nim b/library/archivist_thread_requests/requests/node_info_request.nim index 0c671db4..6c320886 100644 --- a/library/archivist_thread_requests/requests/node_info_request.nim +++ b/library/archivist_thread_requests/requests/node_info_request.nim @@ -14,7 +14,7 @@ import results import pkg/libp2p/switch as libp2p_switch import ../../alloc -from "../../../archivist/archivist" import NodeServer +import ../../../archivist/archivist from ../../../archivist/node import ArchivistNodeRef, switch, discovery # TODO: Should this really be hardcoded here? @@ -43,26 +43,23 @@ proc destroyShared(self: ptr NodeInfoRequest) = deallocShared(self) proc process*( - self: ptr NodeInfoRequest, archivist: ptr NodeServer + self: ptr NodeInfoRequest, archivist: ptr NodeServer ): Future[Result[string, string]] {.async: (raises: []).} = - defer: - destroyShared(self) - case self.operation of VERSION: return ok(archivistVersion) of REVISION: return ok(archivistRevision) of REPO: - if archivist[].isNil: + if archivist.isNil: return err("Archivist node is not initialized") - return ok(string(archivist[].config.dataDir)) + return ok(archivist[].dataDir()) of PEERID: - if archivist[].isNil: + if archivist.isNil or archivist[].isNil: return err("Archivist node is not initialized") return ok($archivist[].archivistNode.switch.peerInfo.peerId) of SPR: - if archivist[].isNil: + if archivist.isNil or archivist[].isNil: return err("Archivist node is not initialized") let spr = archivist[].archivistNode.discovery().dhtRecord if spr.isNone: diff --git a/library/archivist_thread_requests/requests/node_lifecycle_request.nim b/library/archivist_thread_requests/requests/node_lifecycle_request.nim index 8b24bab1..5595dde2 100644 --- a/library/archivist_thread_requests/requests/node_lifecycle_request.nim +++ b/library/archivist_thread_requests/requests/node_lifecycle_request.nim @@ -15,7 +15,6 @@ import toml_serialization import ../../../archivist/conf import ../../alloc -import ../../toml_validation import ../../../archivist/utils import ../../../archivist/utils/[keyutils, fileutils] import ../../../archivist/units @@ -78,10 +77,6 @@ proc createArchivist( ): Future[Result[NodeServer, string]] {.async: (raises: []).} = var conf: NodeConf - let tomlValidationResult = validateTomlCString(configToml) - if tomlValidationResult.isErr: - return err("Failed to create Archivist: TOML validation failed: " & formatError(tomlValidationResult.error)) - try: conf = NodeConf.load( version = nodeFullVersion, @@ -96,7 +91,6 @@ proc createArchivist( ) except ConfigurationError as e: return err("Failed to create Archivist: unable to load configuration: " & e.msg) - conf.setupLogging() try: @@ -104,7 +98,6 @@ proc createArchivist( updateLogLevel(conf.logLevel) except ValueError as err: return err("Failed to create Archivist: invalid value for log level: " & err.msg) - conf.setupMetrics() if not (checkAndCreateDataDir((conf.dataDir).string)): @@ -126,7 +119,6 @@ proc createArchivist( if privateKey.isErr: return err("Failed to create Archivist: unable to get the private key.") let pk = privateKey.get() - let archivist = try: NodeServer.new(conf, pk) @@ -143,13 +135,12 @@ proc process*( case self.operation of CREATE: - archivist[] = ( - await createArchivist( - self.configToml - ) - ).valueOr: - error "Failed to CREATE.", error = error - return err($error) + let createResult = await createArchivist(self.configToml) + if createResult.isErr(): + error "Failed to CREATE.", error = createResult.error + return err(createResult.error) + + archivist[] = createResult.get() of START: try: await archivist[].start() From a1098b2d55219f4779ea7664ab4dc9a09eb5fd83 Mon Sep 17 00:00:00 2001 From: Xav Date: Tue, 10 Mar 2026 10:45:52 -0400 Subject: [PATCH 16/16] fix(archivist): improve shutdown with nil checks and store cleanup --- archivist/archivist.nim | 71 ++++++++++++++++++++++++++++++----------- 1 file changed, 52 insertions(+), 19 deletions(-) diff --git a/archivist/archivist.nim b/archivist/archivist.nim index 13e8479a..ab7505fe 100644 --- a/archivist/archivist.nim +++ b/archivist/archivist.nim @@ -50,6 +50,8 @@ type repoStore: RepoStore maintenance: BlockMaintainer taskpool: Taskpool + started: bool # Track whether the node was started + discoveryStore: Datastore # Store reference to close explicitly NodePrivateKey* = libp2p.PrivateKey # alias @@ -118,24 +120,50 @@ proc start*(s: NodeServer) {.async.} = await s.connectMarketplace() await s.archivistNode.start() s.restServer.start() + s.started = true proc stop*(s: NodeServer) {.async.} = notice "Stopping node" - let res = await noCancel allFinishedFailed[void]( - @[ - s.restServer.stop(), - s.archivistNode.switch.stop(), - s.archivistNode.stop(), - s.repoStore.stop(), - s.maintenance.stop(), - ] - ) - - if res.failure.len > 0: - error "Failed to stop node", failures = res.failure.len - raiseAssert "Failed to stop node" - + if not s.started: + # Close the discovery store to release the LevelDB lock + if not s.discoveryStore.isNil: + try: + discard await s.discoveryStore.close() + except Exception as e: + error "Failed to close discovery store", error = e.msg + if not s.taskpool.isNil: + s.taskpool.shutdown() + return + + var futures: seq[Future[void]] = @[] + + if not s.restServer.isNil: + futures.add(s.restServer.stop()) + + if not s.archivistNode.isNil: + futures.add(s.archivistNode.switch.stop()) + futures.add(s.archivistNode.stop()) + + if not s.repoStore.isNil: + futures.add(s.repoStore.stop()) + + if not s.maintenance.isNil: + futures.add(s.maintenance.stop()) + + if futures.len > 0: + let res = await noCancel allFinishedFailed[void](futures) + + if res.failure.len > 0: + error "Failed to stop node", failures = res.failure.len + raiseAssert "Failed to stop node" + + # Close the discovery store to release the LevelDB lock + if not s.discoveryStore.isNil: + try: + discard await s.discoveryStore.close() + except Exception as e: + error "Failed to close discovery store", error = e.msg if not s.taskpool.isNil: s.taskpool.shutdown() @@ -168,24 +196,27 @@ proc new*( except CatchableError as exc: raiseAssert("Failure in tp initialization:" & exc.msg) - info "Threadpool started", numThreads = tp.numThreads - let discoveryDir = config.dataDir / ArchivistDhtNamespace if io2.createPath(discoveryDir).isErr: - trace "Unable to create discovery directory for block store", - discoveryDir = discoveryDir raise (ref Defect)( msg: "Unable to create discovery directory for block store: " & discoveryDir ) + let discoveryProvidersDir = config.dataDir / ArchivistDhtProvidersNamespace + if io2.createPath(discoveryProvidersDir).isErr: + raise (ref Defect)( + msg: "Unable to create discovery providers directory: " & discoveryProvidersDir + ) + let discoveryStore = Datastore( - LevelDbDatastore.new(config.dataDir / ArchivistDhtProvidersNamespace).expect( + LevelDbDatastore.new(discoveryProvidersDir).expect( "Should create discovery datastore!" ) ) + let discovery = Discovery.new( switch.peerInfo.privateKey, announceAddrs = config.listenAddrs, @@ -276,4 +307,6 @@ proc new*( repoStore: repoStore, maintenance: maintenance, taskpool: tp, + discoveryStore: discoveryStore, + started: false, )